code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import torch
import torch.nn as nn
import numpy as np
import math
import skimage.metrics as metrics
import scipy.ndimage.filters as filters
from volsim.simulation_dataset import *
from lpips.models.dist_model import DistModel as LPIPS_Model
from lsim.distance_model import DistanceModel as LSIM2D_Model
class Metric(nn.Module):
def __init__(self, mode:str):
super(Metric, self).__init__()
assert (mode in ["MSE", "MSE(blurred)", "MSE(fft)", "SSIM", "PSNR", "MI", "CORR", "LPIPS", "LSIM2D"]), "Unknown metric mode!"
self.mode = mode
self.name = mode
self.eval()
if mode == "LPIPS":
self.model = LPIPS_Model()
self.model.initialize(model='net-lin', net='alex', use_gpu=True, spatial=False)
if mode == "LSIM2D":
self.model = LSIM2D_Model(baseType="lsim", isTrain=False, useGPU=True)
self.model.load("src/lsim/models/LSiM.pth")
def forward(self, x:dict) -> torch.Tensor:
full = x["data"]
idxA = x["indexA"][0,x["idxMin"]:x["idxMax"]].long() #only use index of first batch element for entire batch
idxB = x["indexB"][0,x["idxMin"]:x["idxMax"]].long()
dataA = torch.index_select(full, 1, idxA)
dataB = torch.index_select(full, 1, idxB)
dataA = dataA.view(-1,full.shape[2],full.shape[3],full.shape[4],full.shape[5])
dataB = dataB.view(-1,full.shape[2],full.shape[3],full.shape[4],full.shape[5])
dataA = dataA.numpy()
dataB = dataB.numpy()
dataAInt = dataA.astype(np.uint8)
dataBInt = dataB.astype(np.uint8)
distance = np.empty(dataA.shape[0])
for i in range(dataA.shape[0]):
if self.mode == "MSE":
distance[i] = metrics.mean_squared_error(dataA[i], dataB[i])
elif self.mode == "MSE(blurred)":
tempA = filters.gaussian_filter(dataA[i], 2)
tempB = filters.gaussian_filter(dataB[i], 2)
distance[i] = metrics.mean_squared_error(tempA, tempB)
elif self.mode == "MSE(fft)":
tempA = np.abs(np.fft.fftn(dataA[i]))
tempB = np.abs(np.fft.fftn(dataB[i]))
distance[i] = metrics.mean_squared_error(tempA, tempB)
elif self.mode == "SSIM":
distance[i] = 1 - metrics.structural_similarity(dataA[i], dataB[i], data_range=255.0, multichannel=True) #invert as distance measure
elif self.mode == "PSNR":
psnr = -metrics.peak_signal_noise_ratio(dataA[i], dataB[i], data_range=255) #invert as distance measure
distance[i] = psnr if not math.isinf(psnr) else -999
elif self.mode == "MI":
distance[i] = np.mean(metrics.variation_of_information(dataAInt[i], dataBInt[i]))
elif self.mode == "CORR":
tempA = dataA[i].reshape(-1)
tempB = dataB[i].reshape(-1)
stacked = np.stack([tempA, tempB], axis=0)
corr = np.corrcoef(stacked)[0,1]
if np.isnan(corr):
distance[i] = 1 # handle undefined correlation for zero variance
else:
distance[i] = 1 - np.abs(corr) #invert as distance measure
elif self.mode == "LPIPS":
minA = np.min(dataA[i])
maxA = np.max(dataA[i])
rescaledA = 2 * ( (dataA[i] - minA) / (maxA - minA) ) - 1
rescaledA = torch.from_numpy(rescaledA).cuda()
minB = np.min(dataB[i])
maxB = np.max(dataB[i])
rescaledB = 2 * ( (dataB[i] - minB) / (maxB - minB) ) - 1
rescaledB = torch.from_numpy(rescaledB).cuda()
xPermA = rescaledA.permute(0,3,1,2)
xPermB = rescaledB.permute(0,3,1,2)
xResult = self.model(xPermA, xPermB)
yPermA = rescaledA.permute(1,3,0,2)
yPermB = rescaledB.permute(1,3,0,2)
yResult = self.model(yPermA, yPermB)
zPermA = rescaledA.permute(2,3,0,1)
zPermB = rescaledB.permute(2,3,0,1)
zResult = self.model(zPermA, yPermB)
distance[i] = np.mean( (xResult + yResult + zResult) / 3 )
elif self.mode == "LSIM2D":
tensA = torch.from_numpy(dataA[i]).cuda()
tensB = torch.from_numpy(dataB[i]).cuda()
xPermA = tensA.permute(0,3,1,2)[None,...]
xPermB = tensB.permute(0,3,1,2)[None,...]
xDict = {"reference": xPermA, "other": xPermB}
xResult = self.model(xDict).cpu().numpy()
yPermA = tensA.permute(1,3,0,2)[None,...]
yPermB = tensB.permute(1,3,0,2)[None,...]
yDict = {"reference": yPermA, "other": yPermB}
yResult = self.model(yDict).cpu().numpy()
zPermA = tensA.permute(2,3,0,1)[None,...]
zPermB = tensB.permute(2,3,0,1)[None,...]
zDict = {"reference": zPermA, "other": zPermB}
zResult = self.model(zDict).cpu().numpy()
distance[i] = np.mean( (xResult + yResult + zResult) / 3 )
return torch.from_numpy(distance).float().view(full.shape[0], -1)
# input two numpy arrays with shape [width, height, depth, channel] or shape
# [batch, width, height, depth, channel] where channel = 1 or channel = 3
# and return a distance of shape [1] or [batch]
# If true, normalize performs a normalization to the models native data range jointly for the full data batch
# If true, interpolate performs a spatial interpolation to the models native data size jointly for the full data batch
def computeDistance(self, input1:np.ndarray, input2:np.ndarray, normalize:bool, interpolate:bool) -> np.ndarray:
assert (not self.training), "Distance computation should happen in evaluation mode!"
assert (input1.shape == input2.shape), "Input shape mismatch!"
in1 = input1[None,...] if input1.ndim == 4 else input1
in2 = input2[None,...] if input2.ndim == 4 else input2
data_transform = TransformsInference("single", 0, Params(dataScaleInference=-1, dataConvertMode="none", dataNormQuant=1.0, dataNormMin=0.0, dataNormMax=255.0))
if not normalize:
data_transform.normalize = "none"
if not interpolate:
data_transform.outputSize = -1
data = np.concatenate([in1, in2], axis=0) # stack along param dimension
dataDict = {"data": data, "path": None, "distance": None, "indexA" : None, "indexB" : None, "idxMin" : None, "idxMax" : None}
data = data_transform(dataDict)["data"]
nPairs = in1.shape[0]
distance = torch.from_numpy(np.zeros(nPairs, dtype=np.float32))
indexA = torch.from_numpy(np.arange(nPairs, dtype=np.int32))
indexB = torch.from_numpy(np.arange(nPairs, dtype=np.int32) + nPairs)
path = np.array([""]*nPairs)
sample = {"data": data[None,...], "path": path, "distance": distance[None,...],
"indexA" : indexA[None,...], "indexB" : indexB[None,...], "idxMin" : 0, "idxMax" : nPairs}
output = self(sample)
output = output.cpu().detach().view(-1).numpy()
return output | [
"scipy.ndimage.filters.gaussian_filter",
"torch.from_numpy",
"numpy.array",
"skimage.metrics.peak_signal_noise_ratio",
"numpy.arange",
"numpy.mean",
"skimage.metrics.variation_of_information",
"skimage.metrics.structural_similarity",
"numpy.fft.fftn",
"numpy.max",
"numpy.stack",
"skimage.metri... | [((1205, 1238), 'torch.index_select', 'torch.index_select', (['full', '(1)', 'idxA'], {}), '(full, 1, idxA)\n', (1223, 1238), False, 'import torch\n'), ((1255, 1288), 'torch.index_select', 'torch.index_select', (['full', '(1)', 'idxB'], {}), '(full, 1, idxB)\n', (1273, 1288), False, 'import torch\n'), ((1628, 1652), 'numpy.empty', 'np.empty', (['dataA.shape[0]'], {}), '(dataA.shape[0])\n', (1636, 1652), True, 'import numpy as np\n'), ((6499, 6533), 'numpy.concatenate', 'np.concatenate', (['[in1, in2]'], {'axis': '(0)'}), '([in1, in2], axis=0)\n', (6513, 6533), True, 'import numpy as np\n'), ((7011, 7034), 'numpy.array', 'np.array', (["([''] * nPairs)"], {}), "([''] * nPairs)\n", (7019, 7034), True, 'import numpy as np\n'), ((662, 675), 'lpips.models.dist_model.DistModel', 'LPIPS_Model', ([], {}), '()\n', (673, 675), True, 'from lpips.models.dist_model import DistModel as LPIPS_Model\n'), ((822, 879), 'lsim.distance_model.DistanceModel', 'LSIM2D_Model', ([], {'baseType': '"""lsim"""', 'isTrain': '(False)', 'useGPU': '(True)'}), "(baseType='lsim', isTrain=False, useGPU=True)\n", (834, 879), True, 'from lsim.distance_model import DistanceModel as LSIM2D_Model\n'), ((6813, 6847), 'numpy.zeros', 'np.zeros', (['nPairs'], {'dtype': 'np.float32'}), '(nPairs, dtype=np.float32)\n', (6821, 6847), True, 'import numpy as np\n'), ((6883, 6916), 'numpy.arange', 'np.arange', (['nPairs'], {'dtype': 'np.int32'}), '(nPairs, dtype=np.int32)\n', (6892, 6916), True, 'import numpy as np\n'), ((1758, 1804), 'skimage.metrics.mean_squared_error', 'metrics.mean_squared_error', (['dataA[i]', 'dataB[i]'], {}), '(dataA[i], dataB[i])\n', (1784, 1804), True, 'import skimage.metrics as metrics\n'), ((6952, 6985), 'numpy.arange', 'np.arange', (['nPairs'], {'dtype': 'np.int32'}), '(nPairs, dtype=np.int32)\n', (6961, 6985), True, 'import numpy as np\n'), ((1875, 1911), 'scipy.ndimage.filters.gaussian_filter', 'filters.gaussian_filter', (['dataA[i]', '(2)'], {}), '(dataA[i], 2)\n', (1898, 1911), True, 'import scipy.ndimage.filters as filters\n'), ((1936, 1972), 'scipy.ndimage.filters.gaussian_filter', 'filters.gaussian_filter', (['dataB[i]', '(2)'], {}), '(dataB[i], 2)\n', (1959, 1972), True, 'import scipy.ndimage.filters as filters\n'), ((2003, 2043), 'skimage.metrics.mean_squared_error', 'metrics.mean_squared_error', (['tempA', 'tempB'], {}), '(tempA, tempB)\n', (2029, 2043), True, 'import skimage.metrics as metrics\n'), ((2224, 2264), 'skimage.metrics.mean_squared_error', 'metrics.mean_squared_error', (['tempA', 'tempB'], {}), '(tempA, tempB)\n', (2250, 2264), True, 'import skimage.metrics as metrics\n'), ((5256, 5282), 'torch.from_numpy', 'torch.from_numpy', (['distance'], {}), '(distance)\n', (5272, 5282), False, 'import torch\n'), ((2117, 2138), 'numpy.fft.fftn', 'np.fft.fftn', (['dataA[i]'], {}), '(dataA[i])\n', (2128, 2138), True, 'import numpy as np\n'), ((2171, 2192), 'numpy.fft.fftn', 'np.fft.fftn', (['dataB[i]'], {}), '(dataB[i])\n', (2182, 2192), True, 'import numpy as np\n'), ((2337, 2427), 'skimage.metrics.structural_similarity', 'metrics.structural_similarity', (['dataA[i]', 'dataB[i]'], {'data_range': '(255.0)', 'multichannel': '(True)'}), '(dataA[i], dataB[i], data_range=255.0,\n multichannel=True)\n', (2366, 2427), True, 'import skimage.metrics as metrics\n'), ((2514, 2581), 'skimage.metrics.peak_signal_noise_ratio', 'metrics.peak_signal_noise_ratio', (['dataA[i]', 'dataB[i]'], {'data_range': '(255)'}), '(dataA[i], dataB[i], data_range=255)\n', (2545, 2581), True, 'import skimage.metrics as metrics\n'), ((2652, 2668), 'math.isinf', 'math.isinf', (['psnr'], {}), '(psnr)\n', (2662, 2668), False, 'import math\n'), ((2753, 2811), 'skimage.metrics.variation_of_information', 'metrics.variation_of_information', (['dataAInt[i]', 'dataBInt[i]'], {}), '(dataAInt[i], dataBInt[i])\n', (2785, 2811), True, 'import skimage.metrics as metrics\n'), ((2967, 2999), 'numpy.stack', 'np.stack', (['[tempA, tempB]'], {'axis': '(0)'}), '([tempA, tempB], axis=0)\n', (2975, 2999), True, 'import numpy as np\n'), ((3068, 3082), 'numpy.isnan', 'np.isnan', (['corr'], {}), '(corr)\n', (3076, 3082), True, 'import numpy as np\n'), ((3023, 3043), 'numpy.corrcoef', 'np.corrcoef', (['stacked'], {}), '(stacked)\n', (3034, 3043), True, 'import numpy as np\n'), ((3333, 3349), 'numpy.min', 'np.min', (['dataA[i]'], {}), '(dataA[i])\n', (3339, 3349), True, 'import numpy as np\n'), ((3373, 3389), 'numpy.max', 'np.max', (['dataA[i]'], {}), '(dataA[i])\n', (3379, 3389), True, 'import numpy as np\n'), ((3551, 3567), 'numpy.min', 'np.min', (['dataB[i]'], {}), '(dataB[i])\n', (3557, 3567), True, 'import numpy as np\n'), ((3591, 3607), 'numpy.max', 'np.max', (['dataB[i]'], {}), '(dataB[i])\n', (3597, 3607), True, 'import numpy as np\n'), ((4249, 4291), 'numpy.mean', 'np.mean', (['((xResult + yResult + zResult) / 3)'], {}), '((xResult + yResult + zResult) / 3)\n', (4256, 4291), True, 'import numpy as np\n'), ((3229, 3241), 'numpy.abs', 'np.abs', (['corr'], {}), '(corr)\n', (3235, 3241), True, 'import numpy as np\n'), ((5195, 5237), 'numpy.mean', 'np.mean', (['((xResult + yResult + zResult) / 3)'], {}), '((xResult + yResult + zResult) / 3)\n', (5202, 5237), True, 'import numpy as np\n'), ((3492, 3519), 'torch.from_numpy', 'torch.from_numpy', (['rescaledA'], {}), '(rescaledA)\n', (3508, 3519), False, 'import torch\n'), ((3710, 3737), 'torch.from_numpy', 'torch.from_numpy', (['rescaledB'], {}), '(rescaledB)\n', (3726, 3737), False, 'import torch\n'), ((4359, 4385), 'torch.from_numpy', 'torch.from_numpy', (['dataA[i]'], {}), '(dataA[i])\n', (4375, 4385), False, 'import torch\n'), ((4417, 4443), 'torch.from_numpy', 'torch.from_numpy', (['dataB[i]'], {}), '(dataB[i])\n', (4433, 4443), False, 'import torch\n')] |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# for overlaying images:
from matplotlib import offsetbox
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import make_axes_locatable
## Plotting functions ------------------------------------------------------
def plot2D(X=np.ndarray([]), label=np.array([]),
figsize=(10, 10), title=None,
col_map=plt.cm.Spectral, **kwargs):
if len(label) > 0 and X.shape[0] != len(label):
raise ValueError("Number of rows in X must equal length of label, if given.")
ulabs = np.sort(np.unique(label))
plt.figure(figsize=figsize)
if isinstance(title, str):
plt.title(title)
if len(label) == 0:
plt.scatter(X[:,0], X[:,1])
elif any([isinstance(lab, str) for lab in ulabs]) or len(ulabs) <= 10:
for i, lab in enumerate(ulabs):
if type(col_map) == type([]):
plt.scatter(X[label==lab,0], X[label==lab,1],
edgecolor='black', linewidth=0.1,
label=str(lab), c = col_map[i], **kwargs)
else:
plt.scatter(X[label==lab,0], X[label==lab,1],
edgecolor='black', linewidth=0.1,
label=str(lab), **kwargs)
#plt.legend()
else:
plt.scatter(X[:,0], X[:,1],
edgecolor='black', linewidth=0.1,
cmap=col_map, c=label, **kwargs)
plt.colorbar(shrink = 0.8)
#plt.axes().set_aspect('equal')
return
def plot3D(X=np.ndarray([]), label=np.array([]), title=None,
figsize=(12, 10), phi = 20, theta = 60,
col_map=plt.cm.Spectral, col_bar = True):
if len(label) > 0 and X.shape[0] != len(label):
raise ValueError("Number of rows in X must equal length of label, if given.")
ulabs = np.unique(label)
if any([isinstance(lab, str) for lab in ulabs]):
label = [i for i, cat in enumerate(ulabs) for lab in label if lab == cat]
fig = plt.figure(figsize=figsize)
if isinstance(title, str):
plt.suptitle(title)
ax = fig.add_subplot(111, projection='3d')
if len(label) == 0:
ax.scatter(X[:, 0], X[:, 1],X[:, 2])
else:
p = ax.scatter(X[:, 0], X[:, 1],X[:, 2],
c=label, s=50, cmap=col_map,
edgecolor='black', linewidth=0.1)
if col_bar:
fig.colorbar(p, shrink = 0.7)
max_range = np.array([X[:, 0].max() - X[:, 0].min(),
X[:, 1].max() - X[:, 1].min(),
X[:, 2].max() - X[:, 2].min()]).max() / 2.0
mid_x = (X[:, 0].max() + X[:, 0].min()) * 0.5
mid_y = (X[:, 1].max() + X[:, 1].min()) * 0.5
mid_z = (X[:, 2].max() + X[:, 2].min()) * 0.5
ax.set_xlim3d(mid_x - max_range, mid_x + max_range)
ax.set_ylim3d(mid_y - max_range, mid_y + max_range)
ax.set_zlim3d(mid_z - max_range, mid_z + max_range)
ax.view_init(phi, theta)
ax.set_aspect(1.0)
return p
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot2D_with_images(X, labels, images, title=None, figsize=(10, 8)):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure(figsize=figsize)
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(labels[i]),
color=plt.cm.tab10(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 16})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
## Plot list of 2D embeddings:
def plot_embdeddings(X_lst, color, name_lst=None, title="",
figsize=(15, 8), fontsize = 14, color_map=None,
axis_equal = True, discrete_colors=False, ncol = 5, **kwargs):
if 'ncol' in kwargs.keys():
ncol = kwargs['ncol']
del kwargs['ncol']
nrow = np.ceil(len(X_lst) / ncol)
if((len(color) != len(X_lst)) and (len(color) == X_lst[0].shape[0])):
color = [list(color)] * len(X_lst)
if color_map is None and not discrete_colors:
color_map = plt.cm.Spectral
elif color_map is None and discrete_colors:
cmap = mpl.cm.get_cmap('Spectral')
max_num_col = max([len(np.unique(c)) for c in color])
color_map = [mpl.colors.rgb2hex(cmap(x))
for x in np.linspace(0, 1, max_num_col)]
fig = plt.figure(figsize=figsize)
plt.suptitle(title, fontsize=fontsize)
for i, X in enumerate(X_lst):
icol = color[i]
ax = fig.add_subplot(nrow, ncol, 1 + i)
if discrete_colors:
for k, col in enumerate(np.unique(icol)):
idx = (np.array(icol) == col)
plt.scatter(X[idx, 0], X[idx,1], label=str(col),
c=color_map[k], **kwargs)
else:
plt.scatter(X[:, 0], X[:, 1], c=icol, cmap=color_map, **kwargs)
if name_lst is not None:
name = name_lst[i]
plt.title(name, fontsize=fontsize)
if axis_equal:
plt.axis('equal')
return
# import plotly.plotly as py
# import plotly.graph_objs as go
def plotly_3D(x, y, z, label = None, title="",
size = 3, colors='Viridis'):
try:
__import__(module_name)
except ImportError:
return False
else:
if label is not None:
trace1 = plotly.graph_objs.Scatter3d(
x=x, y=y, z=z, mode='markers',
marker=dict(
size=size,
color=label, # set color to an array/list of desired values
colorscale=colors, # choose a colorscale
opacity=0.8
)
)
else:
trace1 = plotly.graph_objs.Scatter3d(
x=x,y=y,z=z,
mode='markers',
marker=dict(
color='rgb(127, 127, 127)',
size=size,
opacity=0.8,
line=dict(
color='rgb(204, 204, 204)',
width=0.5)
)
)
data = [trace1]
layout = plotly.graph_objs.Layout(margin=dict(l=0, r=0, b=0, t=0))
fig = plotly.graph_objs.Figure(data=data, layout=layout)
return fig
# fig = plt.figure(figsize=(17, 6))
# fig.suptitle(r"bandwidth $\log(q_0)$ vs $\log(\sigma_{tsne}$)")
# ax = fig.add_subplot(1, 2, 1, projection='3d')
# p = ax.scatter(
# X[:, 0], X[:, 1],X[:, 2],
# c=np.log(q0), s=50, cmap=plt.cm.RdBu,
# edgecolor='black', linewidth=0.1)
# fig.colorbar(p)
# ax = fig.add_subplot(1, 2, 2, projection='3d')
# p = ax.scatter(
# X[:, 0], X[:, 1],X[:, 2],
# c=np.log(sigma.ravel()), s=50, cmap=plt.cm.RdBu,
# edgecolor='black', linewidth=0.1)
# fig.colorbar(p)
# Do the following once only
# import colorlover as cl
# import matplotlib as mpl
# import plotly.plotly as py
# from matplotlib.colors import LogNorm
# from mpl_toolkits.mplot3d import Axes3D
# import plotly
# plotly.tools.set_credentials_file(username='nlhuong', api_key='blablabla')
# fig = plotly_3D(X[:, 0], X[:, 1], X[:, 2], label = color, size = 4,
# colors='Rainbow')
# py.iplot(fig, filename='Swiss Roll Uniform') | [
"matplotlib.offsetbox.OffsetImage",
"numpy.array",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplot.axis",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"nu... | [((324, 338), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (334, 338), True, 'import numpy as np\n'), ((346, 358), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (354, 358), True, 'import numpy as np\n'), ((630, 657), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (640, 657), True, 'import matplotlib.pyplot as plt\n'), ((1602, 1616), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (1612, 1616), True, 'import numpy as np\n'), ((1624, 1636), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1632, 1636), True, 'import numpy as np\n'), ((1910, 1926), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (1919, 1926), True, 'import numpy as np\n'), ((2077, 2104), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2087, 2104), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3382, 3399), True, 'import matplotlib.pyplot as plt\n'), ((3409, 3425), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3420, 3425), True, 'import matplotlib.pyplot as plt\n'), ((5245, 5272), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (5255, 5272), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5315), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'fontsize': 'fontsize'}), '(title, fontsize=fontsize)\n', (5289, 5315), True, 'import matplotlib.pyplot as plt\n'), ((608, 624), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (617, 624), True, 'import numpy as np\n'), ((697, 713), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (706, 713), True, 'import matplotlib.pyplot as plt\n'), ((746, 775), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {}), '(X[:, 0], X[:, 1])\n', (757, 775), True, 'import matplotlib.pyplot as plt\n'), ((2144, 2163), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (2156, 2163), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3314), 'numpy.min', 'np.min', (['X', '(0)'], {}), '(X, 0)\n', (3308, 3314), True, 'import numpy as np\n'), ((3316, 3328), 'numpy.max', 'np.max', (['X', '(0)'], {}), '(X, 0)\n', (3322, 3328), True, 'import numpy as np\n'), ((3744, 3766), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (3752, 3766), True, 'import numpy as np\n'), ((4232, 4246), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4242, 4246), True, 'import matplotlib.pyplot as plt\n'), ((4248, 4262), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4258, 4262), True, 'import matplotlib.pyplot as plt\n'), ((4297, 4313), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4306, 4313), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1466), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'edgecolor': '"""black"""', 'linewidth': '(0.1)', 'cmap': 'col_map', 'c': 'label'}), "(X[:, 0], X[:, 1], edgecolor='black', linewidth=0.1, cmap=\n col_map, c=label, **kwargs)\n", (1376, 1466), True, 'import matplotlib.pyplot as plt\n'), ((1510, 1534), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.8)'}), '(shrink=0.8)\n', (1522, 1534), True, 'import matplotlib.pyplot as plt\n'), ((3842, 3879), 'numpy.sum', 'np.sum', (['((X[i] - shown_images) ** 2)', '(1)'], {}), '((X[i] - shown_images) ** 2, 1)\n', (3848, 3879), True, 'import numpy as np\n'), ((5029, 5056), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (5044, 5056), True, 'import matplotlib as mpl\n'), ((5702, 5765), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'icol', 'cmap': 'color_map'}), '(X[:, 0], X[:, 1], c=icol, cmap=color_map, **kwargs)\n', (5713, 5765), True, 'import matplotlib.pyplot as plt\n'), ((5842, 5876), 'matplotlib.pyplot.title', 'plt.title', (['name'], {'fontsize': 'fontsize'}), '(name, fontsize=fontsize)\n', (5851, 5876), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5929), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (5920, 5929), True, 'import matplotlib.pyplot as plt\n'), ((3532, 3562), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['(labels[i] / 10.0)'], {}), '(labels[i] / 10.0)\n', (3544, 3562), True, 'import matplotlib.pyplot as plt\n'), ((3895, 3907), 'numpy.min', 'np.min', (['dist'], {}), '(dist)\n', (3901, 3907), True, 'import numpy as np\n'), ((4116, 4168), 'matplotlib.offsetbox.OffsetImage', 'offsetbox.OffsetImage', (['images[i]'], {'cmap': 'plt.cm.gray_r'}), '(images[i], cmap=plt.cm.gray_r)\n', (4137, 4168), False, 'from matplotlib import offsetbox\n'), ((5486, 5501), 'numpy.unique', 'np.unique', (['icol'], {}), '(icol)\n', (5495, 5501), True, 'import numpy as np\n'), ((5199, 5229), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'max_num_col'], {}), '(0, 1, max_num_col)\n', (5210, 5229), True, 'import numpy as np\n'), ((5527, 5541), 'numpy.array', 'np.array', (['icol'], {}), '(icol)\n', (5535, 5541), True, 'import numpy as np\n'), ((5088, 5100), 'numpy.unique', 'np.unique', (['c'], {}), '(c)\n', (5097, 5100), True, 'import numpy as np\n')] |
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
sns.set_style("darkgrid")
df = pd.read_csv("ibex_35.csv")
df = df.dropna()
log_returns = np.diff(np.log(df["Close"].values))
mu , std = norm.fit(log_returns)
fig , ax = plt.subplots(figsize=(12,8))
plt.rcParams.update({'font.size': 16})
ret = plt.hist(log_returns , bins = 100 , density = True , color="dodgerblue" , label = "Empirical Distribution")
xmin , xmax = plt.xlim()
x = np.linspace(xmin , xmax , 100)
p = norm.pdf(x , mu , std)
plt.plot(x , p ,'k' , linewidth=2 , label = "Gaussian Fit")
plt.legend(loc=2)
plt.title("IBEX-35 Log Returns Distribution")
plt.xlabel("$R_{(t)}$")
axins1 = zoomed_inset_axes(ax, zoom = 5, loc=1)
axins1.hist(log_returns , bins=100 , density = True , color="dodgerblue" , label="Fat Tails")
axins1.plot(x,p , 'k')
axins1.legend(loc=1)
x1, x2, y1, y2 = 0.035,0.060,0.05,2
axins1.set_xlim(x1, x2)
axins1.set_ylim(y1, y2)
plt.yticks(visible=False)
mark_inset(ax, axins1, loc1=4, loc2=3, fc="none", ec="0.5")
plt.savefig("gaussian_distributionibex.pdf")
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes",
"mpl_toolkits.axes_grid1.inset_locator.mark_inset",
"numpy.log",
"seaborn.set_style",
"matplotlib.pyplot.rcPara... | [((252, 277), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (265, 277), True, 'import seaborn as sns\n'), ((284, 310), 'pandas.read_csv', 'pd.read_csv', (['"""ibex_35.csv"""'], {}), "('ibex_35.csv')\n", (295, 310), True, 'import pandas as pd\n'), ((392, 413), 'scipy.stats.norm.fit', 'norm.fit', (['log_returns'], {}), '(log_returns)\n', (400, 413), False, 'from scipy.stats import norm\n'), ((426, 455), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (438, 455), True, 'import matplotlib.pyplot as plt\n'), ((455, 493), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (474, 493), True, 'import matplotlib.pyplot as plt\n'), ((500, 602), 'matplotlib.pyplot.hist', 'plt.hist', (['log_returns'], {'bins': '(100)', 'density': '(True)', 'color': '"""dodgerblue"""', 'label': '"""Empirical Distribution"""'}), "(log_returns, bins=100, density=True, color='dodgerblue', label=\n 'Empirical Distribution')\n", (508, 602), True, 'import matplotlib.pyplot as plt\n'), ((622, 632), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (630, 632), True, 'import matplotlib.pyplot as plt\n'), ((638, 666), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(100)'], {}), '(xmin, xmax, 100)\n', (649, 666), True, 'import numpy as np\n'), ((673, 693), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', 'mu', 'std'], {}), '(x, mu, std)\n', (681, 693), False, 'from scipy.stats import norm\n'), ((696, 750), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'p', '"""k"""'], {'linewidth': '(2)', 'label': '"""Gaussian Fit"""'}), "(x, p, 'k', linewidth=2, label='Gaussian Fit')\n", (704, 750), True, 'import matplotlib.pyplot as plt\n'), ((756, 773), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (766, 773), True, 'import matplotlib.pyplot as plt\n'), ((775, 820), 'matplotlib.pyplot.title', 'plt.title', (['"""IBEX-35 Log Returns Distribution"""'], {}), "('IBEX-35 Log Returns Distribution')\n", (784, 820), True, 'import matplotlib.pyplot as plt\n'), ((821, 844), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$R_{(t)}$"""'], {}), "('$R_{(t)}$')\n", (831, 844), True, 'import matplotlib.pyplot as plt\n'), ((856, 892), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax'], {'zoom': '(5)', 'loc': '(1)'}), '(ax, zoom=5, loc=1)\n', (873, 892), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\n'), ((1118, 1143), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'visible': '(False)'}), '(visible=False)\n', (1128, 1143), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1203), 'mpl_toolkits.axes_grid1.inset_locator.mark_inset', 'mark_inset', (['ax', 'axins1'], {'loc1': '(4)', 'loc2': '(3)', 'fc': '"""none"""', 'ec': '"""0.5"""'}), "(ax, axins1, loc1=4, loc2=3, fc='none', ec='0.5')\n", (1154, 1203), False, 'from mpl_toolkits.axes_grid1.inset_locator import mark_inset\n'), ((1204, 1248), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gaussian_distributionibex.pdf"""'], {}), "('gaussian_distributionibex.pdf')\n", (1215, 1248), True, 'import matplotlib.pyplot as plt\n'), ((352, 378), 'numpy.log', 'np.log', (["df['Close'].values"], {}), "(df['Close'].values)\n", (358, 378), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
# Benchmark function
def dejong5(X1, X2):
n_vals = len(X1)
Y = np.full( (n_vals), np.nan)
for i in range(n_vals):
x1, x2 = X1[i], X2[i]
total = 0
A = np.zeros((2,25))
a = np.array([-32, -16, 0, 16, 32])
A[0,:] = np.tile(a,(1,5))
A[1,:] = np.sort(np.tile(a,(1,5)))
for ii in range(25):
a1i = A[0,ii]
a2i = A[1,ii]
term1 = ii+1
term2 = (x1 - a1i) ** 6
term3 = (x2 - a2i) ** 6
new = 1 / (term1 + term2 + term3)
total += new
Y[i] = y = 1 / (0.002 + total)
return Y
# As a constraint only
def dejong_constraint(X1,X2, threshold=445):
""" 1) Expected Range of [-50 to 50]
2) Optima (0,0) in invalid region
"""
# Scale
zoom = .39
_x1 = X1*zoom
_x2 = X2*zoom
# Rotate
theta = np.radians(-33)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
_x1, _x2 = R@np.array([_x1, _x2])
# Translate
_x1 += 9
_x2 += -3
# Evaluate
valid = (dejong5(_x1, _x2) < threshold)
return valid
def create_dataset(random=False, res=100, n_samples=1000):
# Collect dataset of valid solutions
abs_range = 50
if random:
x = (2*(np.random.rand(n_samples)-0.5)) * abs_range
y = (2*(np.random.rand(n_samples)-0.5)) * abs_range
Z = dejong_constraint(x,y)
pts = np.c_[x,y]
else:
x = np.linspace(-abs_range, abs_range, res)
y = np.linspace(-abs_range, abs_range, res)
X, Y = np.meshgrid(x, y) # grid of point
Z = dejong_constraint(X.flatten(), Y.flatten())
pts = np.c_[X.flatten(),Y.flatten()]
valid_pts = pts[Z,:]
return valid_pts
def main():
"Saves valid dataset as 'dejong_dataset.csv' "
n_samples = 5000
valid_pts = create_dataset(random=True)
training_set = [valid_pts]
while len(np.vstack(training_set)) < n_samples:
valid_pts = create_dataset(random=True)
training_set += [valid_pts]
training_set = np.vstack(training_set)
training_set = training_set[:n_samples]
np.savetxt('dejong_dataset.csv', training_set)
# Visualize
fig, ax = plt.subplots(figsize=(4, 4))
ax.scatter(training_set[:,0], training_set[:,1], s=1)
ax.scatter(0,0,c='r',s=80)
ax.set(xlim=[-51,51], ylim=[-51,51])
fig.savefig('dejong_valid_solutions.png')
print(f'[*] Done: {len(training_set)} data points created')
## -- Train Classic VAE -- #
print('\n[*] Training VAE')
# Taken from vae_datagen
from vae_basic import VecVAE, train_vae
from sklearn import preprocessing
raw_data = training_set
scaler = preprocessing.StandardScaler().fit(raw_data) # zero mean unit standard deviation
genomes = scaler.transform(raw_data)
n_dim, n_latent, n_epochs = genomes.shape[1], 2, 1000
vae = VecVAE(n_dim, n_latent)
vae = train_vae(genomes, vae, n_epochs, view_mod=25)
vae.save('dejong_vae.pt')
if __name__ == "__main__":
main()
| [
"numpy.radians",
"numpy.tile",
"numpy.random.rand",
"vae_basic.train_vae",
"numpy.sin",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.meshgrid",
"numpy.vstack",
"numpy.savetxt",
"vae_basic.VecVAE",
"numpy.cos",
"numpy.full",
"matplotlib.... | [((125, 148), 'numpy.full', 'np.full', (['n_vals', 'np.nan'], {}), '(n_vals, np.nan)\n', (132, 148), True, 'import numpy as np\n'), ((957, 972), 'numpy.radians', 'np.radians', (['(-33)'], {}), '(-33)\n', (967, 972), True, 'import numpy as np\n'), ((1021, 1048), 'numpy.array', 'np.array', (['((c, -s), (s, c))'], {}), '(((c, -s), (s, c)))\n', (1029, 1048), True, 'import numpy as np\n'), ((2183, 2206), 'numpy.vstack', 'np.vstack', (['training_set'], {}), '(training_set)\n', (2192, 2206), True, 'import numpy as np\n'), ((2255, 2301), 'numpy.savetxt', 'np.savetxt', (['"""dejong_dataset.csv"""', 'training_set'], {}), "('dejong_dataset.csv', training_set)\n", (2265, 2301), True, 'import numpy as np\n'), ((2333, 2361), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (2345, 2361), True, 'import matplotlib.pyplot as plt\n'), ((3015, 3038), 'vae_basic.VecVAE', 'VecVAE', (['n_dim', 'n_latent'], {}), '(n_dim, n_latent)\n', (3021, 3038), False, 'from vae_basic import VecVAE, train_vae\n'), ((3049, 3095), 'vae_basic.train_vae', 'train_vae', (['genomes', 'vae', 'n_epochs'], {'view_mod': '(25)'}), '(genomes, vae, n_epochs, view_mod=25)\n', (3058, 3095), False, 'from vae_basic import VecVAE, train_vae\n'), ((245, 262), 'numpy.zeros', 'np.zeros', (['(2, 25)'], {}), '((2, 25))\n', (253, 262), True, 'import numpy as np\n'), ((274, 305), 'numpy.array', 'np.array', (['[-32, -16, 0, 16, 32]'], {}), '([-32, -16, 0, 16, 32])\n', (282, 305), True, 'import numpy as np\n'), ((324, 342), 'numpy.tile', 'np.tile', (['a', '(1, 5)'], {}), '(a, (1, 5))\n', (331, 342), True, 'import numpy as np\n'), ((984, 997), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (990, 997), True, 'import numpy as np\n'), ((999, 1012), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1005, 1012), True, 'import numpy as np\n'), ((1066, 1086), 'numpy.array', 'np.array', (['[_x1, _x2]'], {}), '([_x1, _x2])\n', (1074, 1086), True, 'import numpy as np\n'), ((1566, 1605), 'numpy.linspace', 'np.linspace', (['(-abs_range)', 'abs_range', 'res'], {}), '(-abs_range, abs_range, res)\n', (1577, 1605), True, 'import numpy as np\n'), ((1618, 1657), 'numpy.linspace', 'np.linspace', (['(-abs_range)', 'abs_range', 'res'], {}), '(-abs_range, abs_range, res)\n', (1629, 1657), True, 'import numpy as np\n'), ((1673, 1690), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1684, 1690), True, 'import numpy as np\n'), ((366, 384), 'numpy.tile', 'np.tile', (['a', '(1, 5)'], {}), '(a, (1, 5))\n', (373, 384), True, 'import numpy as np\n'), ((2042, 2065), 'numpy.vstack', 'np.vstack', (['training_set'], {}), '(training_set)\n', (2051, 2065), True, 'import numpy as np\n'), ((2823, 2853), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (2851, 2853), False, 'from sklearn import preprocessing\n'), ((1380, 1405), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (1394, 1405), True, 'import numpy as np\n'), ((1440, 1465), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (1454, 1465), True, 'import numpy as np\n')] |
import numpy as np
from ccgowl.models.functions.function import Function
from ccgowl.models.functions.owl import OWL
def _get_off_diagonal_entries(x):
lt_indices = np.tril_indices_from(x, -1)
lt_indices = list(zip(*lt_indices))
return lt_indices, np.array([x[i][j] for i, j in lt_indices])
class GOWL(Function):
def eval(self, x, weights):
"""
g(X) = sum_{i=1}^p rho_i * |x|_[i]
:param x: (p x p) matrix
:param weights: weights for owl penalty
"""
nsfunc = OWL()
_, off_diagonal_entries = _get_off_diagonal_entries(x)
return nsfunc.eval(off_diagonal_entries, weights)
def gradient(self, beta, weights):
raise NotImplementedError("The OWL function is a non-smooth function. \n"
"Please call the prox function.")
def prox(self, x, weights):
"""
:param x: (p x p) matrix
:param weights: weights for owl penalty
"""
nsfunc = OWL()
lt_indices, off_diagonal_entries = _get_off_diagonal_entries(x)
prox_x = nsfunc.prox(off_diagonal_entries, weights)
for i, pair in enumerate(lt_indices):
x[pair] = prox_x[i]
return np.tril(x, -1) + np.tril(x).T
def hessian(self):
raise NotImplementedError("The OWL function is a non-smooth function. \n"
"Please call the prox function.")
| [
"numpy.array",
"numpy.tril_indices_from",
"ccgowl.models.functions.owl.OWL",
"numpy.tril"
] | [((171, 198), 'numpy.tril_indices_from', 'np.tril_indices_from', (['x', '(-1)'], {}), '(x, -1)\n', (191, 198), True, 'import numpy as np\n'), ((262, 304), 'numpy.array', 'np.array', (['[x[i][j] for i, j in lt_indices]'], {}), '([x[i][j] for i, j in lt_indices])\n', (270, 304), True, 'import numpy as np\n'), ((527, 532), 'ccgowl.models.functions.owl.OWL', 'OWL', ([], {}), '()\n', (530, 532), False, 'from ccgowl.models.functions.owl import OWL\n'), ((999, 1004), 'ccgowl.models.functions.owl.OWL', 'OWL', ([], {}), '()\n', (1002, 1004), False, 'from ccgowl.models.functions.owl import OWL\n'), ((1233, 1247), 'numpy.tril', 'np.tril', (['x', '(-1)'], {}), '(x, -1)\n', (1240, 1247), True, 'import numpy as np\n'), ((1250, 1260), 'numpy.tril', 'np.tril', (['x'], {}), '(x)\n', (1257, 1260), True, 'import numpy as np\n')] |
###############################################################################
# Omid55
# Start date: 16 Oct 2018
# Modified date: 02 Apr 2019
# Author: <NAME>
# Email: <EMAIL>
# Dynamic networks and specificly structural balance theory utility module.
###############################################################################
from __future__ import division, print_function, absolute_import, unicode_literals
import itertools
import datetime
import math
import numpy as np
import scipy as sp
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import seaborn as sns
# import enforce
from typing import Dict
from typing import List
from typing import Union
from typing import Set
from typing import Tuple
import utils
###############################################################################
# Network related functions
###############################################################################
# @enforce.runtime_validation
def extract_graph(
selected_edge_list: pd.DataFrame,
sum_multiple_edge: bool = True) -> nx.DiGraph:
"""Extracts a list of graphs in each period of time.
If there were multiple edges between two nodes, then if sum_multiple_edge
is True, the edge weight is assigned as the summation of all edge weights.
If sum_multiple_edge is False, then the latest edge weight is assigned as
the weight.
Args:
selected_edge_list: Dataframe of edges containing required columns.
sum_multiple_edge: Whether to pick the latest or sum multiple edges.
Returns:
Directed graph created from selected edge list.
Raises:
ValueError: if it does not contain required columns.
"""
utils.check_required_columns(
selected_edge_list, ['source', 'target', 'weight'])
if not sum_multiple_edge:
# Considers the weight of the latest edge weight as the weight.
dataframe = selected_edge_list
else:
# Considers summation of edge weights as the weight.
dataframe = selected_edge_list.groupby(
['source', 'target'], as_index=False)['weight'].sum()
return nx.from_pandas_edgelist(
dataframe,
source='source',
target='target',
edge_attr='weight',
create_using=nx.DiGraph())
# @enforce.runtime_validation
def extract_graphs(
edge_list: pd.DataFrame,
weeks: int = 4,
accumulative: bool = False,
sum_multiple_edge: bool = True) -> List[nx.DiGraph]:
"""Extracts a list of graphs in each period of time.
It extracts graph structure for periods with the duration of given number
of weeks. Separated networks are created from the edges only happened
in that period; However, accumulative ones are created from all edges
since the beginnig until that period.
Args:
edge_list: Dataframe of edges containing required columns.
weeks: The number of weeks for the desired period length.
accumulative: Whether we need separated networks or accumulative ones.
sum_multiple_edge: Whether to pick the latest or sum multiple edges.
Returns:
List of directed graphs created based on accumulative or separated.
Raises:
ValueError: if it does not contain required columns.
"""
utils.check_required_columns(
edge_list, ['edge_date', 'source', 'target', 'weight'])
start_date = min(edge_list['edge_date'])
end_date = max(edge_list['edge_date'])
dgraphs = []
periods_num = int(np.floor((end_date - start_date).days / (weeks * 7)))
# For every period, we extract a directed weighted signed network.
for period_index in range(periods_num):
period_start = (
start_date + period_index * datetime.timedelta(weeks * 7))
period_end = period_start + datetime.timedelta(weeks * 7)
if not accumulative:
# Chooses the edges only for this period.
selected_edges = edge_list[
(edge_list['edge_date'] >= period_start) &
(edge_list['edge_date'] < period_end)]
else:
# Chooses the edges until this period (accumulative).
selected_edges = edge_list[edge_list['edge_date'] < period_end]
dgraph = extract_graph(
selected_edge_list=selected_edges,
sum_multiple_edge=sum_multiple_edge)
dgraphs.append(dgraph)
return dgraphs
# @enforce.runtime_validation
def get_all_degrees(graph: nx.DiGraph) -> Dict[str, Dict[str, int]]:
"""Gets self-, in- and out-degree of a given graph.
Args:
graph: Given directed graph.
Returns:
Dict. of every node mapped to a dictionary of self, in and out degree.
Raises:
None.
"""
degrees = {}
adjacency_matrix = nx.adj_matrix(graph).todense()
for i, node in enumerate(graph.nodes()):
self_degree = adjacency_matrix[i, i]
out_edges = adjacency_matrix[i, :]
in_edges = adjacency_matrix[:, i]
out_degree = np.sum(out_edges) - self_degree
in_degree = np.sum(in_edges) - self_degree
degrees[node] = (
{'self': self_degree, 'in': in_degree, 'out': out_degree})
return degrees
# @enforce.runtime_validation
def get_just_periods(
edge_list: pd.DataFrame,
weeks: int = 4,
accumulative: bool = False) -> List:
"""Extracts a list of graphs in each period of time.
It extracts graph structure for periods with the duration of given number
of weeks. Separated networks are created from the edges only happened
in that period; However, accumulative ones are created from all edges
since the beginnig until that period.
Args:
edge_list: Dataframe of edges containing required columns.
weeks: The number of weeks for the desired period length.
accumulative: Whether we need separated networks or accumulative ones.
sum_multiple_edge: Whether to pick the latest or sum multiple edges.
Returns:
List of directed graphs created based on accumulative or separated.
Raises:
ValueError: if it does not contain required columns.
"""
utils.check_required_columns(
edge_list, ['edge_date'])
periods = []
start_date = min(edge_list['edge_date'])
end_date = max(edge_list['edge_date'])
periods_num = int(np.floor((end_date - start_date).days / (weeks * 7)))
for period_index in range(periods_num):
period_start = (
start_date + period_index * datetime.timedelta(weeks * 7))
period_end = period_start + datetime.timedelta(weeks * 7)
if not accumulative:
if isinstance(period_start, datetime.datetime):
ps = period_start.date()
pe = period_end.date()
else:
ps = period_start
pe = period_end
periods.append([str(ps), str(pe)])
else:
if isinstance(start_date, datetime.datetime):
ps = start_date.date()
pe = period_end.date()
else:
ps = start_date
pe = period_end
periods.append([str(ps), str(pe)])
return periods
# # @enforce.runtime_validation
def get_metrics_for_network(
dgraph: nx.DiGraph) -> Dict[str, Union[float, int]]:
"""Gets the different metrics of the given directed network.
Args:
dgraph: The input network.
Returns:
Dictionary of metrics mapped from name to integer value.
Raises:
None
"""
metrics = {}
# For directed graph.
n = len(dgraph.nodes())
e = len(dgraph.edges())
metrics['#nodes'] = n
metrics['#edges'] = e
metrics['#edges/#nodes'] = e / n
metrics['average in degree'] = np.mean(
[deg for _, deg in list(dgraph.in_degree)])
metrics['average out degree'] = np.mean(
[deg for _, deg in list(dgraph.out_degree)])
metrics['average w in degree'] = np.mean(
[deg for _, deg in list(dgraph.in_degree(weight='weight'))])
metrics['average w out degree'] = np.mean(
[deg for _, deg in list(dgraph.out_degree(weight='weight'))])
metrics['average degree'] = np.mean(
[deg for _, deg in list(dgraph.degree)])
metrics['average load'] = np.mean(list(
nx.load_centrality(dgraph).values()))
metrics['average eigenvector'] = np.mean(list(
nx.eigenvector_centrality(dgraph, max_iter=10000).values()))
metrics['average harmonic'] = np.mean(list(
nx.harmonic_centrality(dgraph).values()))
metrics['average closeness'] = np.mean(list(
nx.closeness_centrality(dgraph).values()))
metrics['average betweenness'] = np.mean(list(
nx.betweenness_centrality(dgraph).values()))
# Directed graphs' weights.
weights = np.zeros(len(dgraph.edges()))
for i, edge in enumerate(dgraph.edges()):
weights[i] = dgraph.get_edge_data(edge[0], edge[1])['weight']
metrics['weights min'] = min(weights)
metrics['weights max'] = max(weights)
metrics['weights average'] = np.mean(weights)
metrics['weights std'] = np.std(weights)
metrics['#pos edges'] = len(np.where(weights > 0)[0])
metrics['#neg edges'] = len(np.where(weights < 0)[0])
# For undirected version of the given directed graph.
ugraph = nx.to_undirected(dgraph)
metrics['average (und) clustering coefficient'] = np.mean(
list(nx.clustering(ugraph, weight=None).values()))
metrics['algebraic connectivity'] = nx.algebraic_connectivity(
ugraph, weight='weight')
# For Giant Connected Component.
GCC = ugraph.subgraph(max(c for c in nx.connected_components(ugraph)))
metrics['#gcc nodes'] = len(GCC.nodes())
metrics['#gcc edges'] = len(GCC.edges())
gcc_weights = np.zeros(len(GCC.edges()))
for i, edge in enumerate(GCC.edges()):
gcc_weights[i] = GCC.get_edge_data(edge[0], edge[1])['weight']
metrics['#gcc pos edges'] = len(np.where(gcc_weights > 0)[0])
metrics['#gcc neg edges'] = len(np.where(gcc_weights < 0)[0])
metrics['gcc algebraic connectivity'] = nx.algebraic_connectivity(
GCC, weight='weight')
metrics['gcc diameter'] = nx.diameter(GCC)
# My balance metrics.
edge_balance = compute_vanderijt_edge_balance(dgraph)
balanced_cycles = 0
cycles = 0
for value in edge_balance.values():
balanced_cycles += value['#balanced_node3']
cycles += value['#nodes3']
balanced_ratio = None
if cycles:
balanced_ratio = balanced_cycles / cycles
# metrics['balanced cycles 3 ratio'] = balanced_ratio
if balanced_ratio is None:
metrics['unbalanced cycles 3 ratio'] = None
else:
metrics['unbalanced cycles 3 ratio'] = 1 - balanced_ratio
return metrics
# @enforce.runtime_validation
def count_different_signed_edges(dgraph: nx.DiGraph) -> int:
"""Counts the number of edges with different signs.
Args:
dgraph: Directed graph that is given for computing balance ratio.
Returns:
Number of edges with different signs in the given directed graph.
Raises:
None
"""
different_signs = 0
nodes = list(dgraph.nodes())
for i in range(len(nodes)-1):
for j in range(i+1, len(nodes)):
if (dgraph.has_edge(nodes[i], nodes[j]) and
dgraph.has_edge(nodes[j], nodes[i])):
wij = dgraph.get_edge_data(
nodes[i], nodes[j])['weight']
wji = dgraph.get_edge_data(
nodes[j], nodes[i])['weight']
if np.sign(wij) != np.sign(wji):
different_signs += 1
return different_signs
# @enforce.runtime_validation
def plot_evolving_graphs(
dgraphs: List[nx.DiGraph],
titles: List[str] = None,
aggregated_dgraph: nx.DiGraph = None) -> None:
"""Plots list of given graphs.
If aggregated_dgraph is None, it computes it by combining
all graphs together.
Args:
dgraphs: List of given evolving graphs.
titles: List of title content if needed.
aggregated_dgraph: One aggregated graph.
Returns:
None.
Raises:
None.
"""
n = 3
m = np.ceil(len(dgraphs) / n)
sns.set(rc={'figure.figsize': (6*n, 6*m)})
if not aggregated_dgraph:
aggregated_dgraph = nx.compose_all(dgraphs)
all_positions = nx.layout.spring_layout(aggregated_dgraph)
for index, dgraph in enumerate(dgraphs):
plt.subplot(m, n, index + 1)
nx.draw(dgraph, pos=all_positions, with_labels=True)
if titles:
title_name = titles[index]
else:
title_name = 'Period {}'.format(index + 1)
plt.title('{}\n{} nodes, {} edges'.format(
title_name, len(dgraph.nodes()), len(dgraph.edges())))
# @enforce.runtime_validation
def compute_fairness_goodness(
dgraph: nx.DiGraph,
weight_range: float = 20.0,
max_iteration: int = 100,
verbose: bool = True) -> Dict[str, Dict[int, float]]:
"""Computes fairness and goodness per node in a weighted signed graph.
Args:
dgraph: Weighted signed graph with weights fall in [-l, l].
weight_range: Range of weights, for above graph is 2*l.
max_iteration: The maximum number of iterations if not converge.
verbose: If we want to print information while computing.
Returns:
Dictionary of fairness and goodness as dictionary of values for nodes.
Raises:
None.
"""
# Initializes fairness of all nodes to 1 and goodness of all to 0.
fairness = {}
goodness = {}
nodes = dgraph.nodes()
for node in nodes:
fairness[node] = 1
in_degree = dgraph.in_degree(node)
if in_degree:
goodness[node] = dgraph.in_degree(
node, weight='weight') / in_degree
else:
goodness[node] = 0
nodes = dgraph.nodes()
for iteration in range(max_iteration):
fairness_diff = 0
goodness_diff = 0
if verbose:
print('-----------------')
print("Iteration number", iteration)
print('Updating goodness')
for node in nodes:
inedges = dgraph.in_edges(node, data='weight')
g = 0
for edge in inedges:
g += fairness[edge[0]]*edge[2]
in_degree = len(inedges)
if in_degree:
goodness_diff += abs(g/in_degree - goodness[node])
goodness[node] = g/in_degree
if verbose:
print('Updating fairness')
for node in nodes:
outedges = dgraph.out_edges(node, data='weight')
f = 0
for edge in outedges:
f += 1.0 - abs(edge[2] - goodness[edge[1]])/weight_range
out_degree = len(outedges)
if out_degree:
fairness_diff += abs(f/out_degree - fairness[node])
fairness[node] = f/out_degree
if verbose:
print('Differences in fairness and goodness = {}, {}.'.format(
fairness_diff, goodness_diff))
if (fairness_diff < math.pow(10, -6)
and goodness_diff < math.pow(10, -6)):
break
return {'fairness': fairness, 'goodness': goodness}
###############################################################################
# Balance related functions
###############################################################################
# @enforce.runtime_validation
def cartwright_harary_balance_ratio(dgraph: nx.DiGraph) -> float:
"""Computes the cartwright and harary balance ratio.
It computes all cycles in the network. Then for them, it counts
the number of cycles that have even number of negative signs. By
Cartwright and Hararry '1956, those are not balanced. This function
returns the ration of those by all cycles in the given directed graph.
Cartwright and Hararry balance for node i: This can be also used for
defining balance for node i as the it should compute all cycles containing
node i in the network and then should compute the ratio of positive cycles
(the cycles with multiplication of all signs to be positive) to all cycles
(Hararry 1955 [On local balance and $N$-balance in signed graphs]).
However, generally it is very computationally expensive to compute.
Args:
dgraph: Directed graph that is given for computing balance ratio.
Returns:
Number of cycles with even number of negative divided by all cycles.
Raises:
None
"""
balanced_cycle_count = 0
cycle_count = 0
for cycle in nx.simple_cycles(dgraph):
cycle_count += 1
cycle += [cycle[0]]
# For every cycle we count the number of negative edges.
negative_count = 0
for index in range(len(cycle) - 1):
if dgraph.get_edge_data(
cycle[index], cycle[index + 1])['weight'] < 0:
negative_count += 1
if negative_count % 2 == 0:
balanced_cycle_count += 1
if cycle_count:
balance_ratio = balanced_cycle_count / cycle_count
else:
balance_ratio = 1
return balance_ratio
# @enforce.runtime_validation
def fullyconnected_balance_ratio(
dgraph: nx.DiGraph,
balance_type: int = 1) -> Tuple[float, int, int]:
"""Computes the ratio of classical balance for all triads.
Parameter balance_type could take 1, 2, or 3.
1 is Cartwright & Harary, 2 is Clusering, and 3 is Transitivity.
Args:
dgraph: Given directed graph with 0 and +1.
balance_type: The definition type for structural balance.
Returns:
The ratio of balance, number of balanced triads, number of unbalanced
triads.
Raises:
ValueError: If balance_type was anything but 1, 2 or 3.
"""
if balance_type not in [1, 2, 3]:
raise ValueError(
'Balance_type was incorrect.'
' It should be 1, 2, or 3. But it was: {}.'.format(balance_type))
# The adjancey matrix.
adj_matrix = utils.dgraph2adjacency(dgraph)
adj_matrix[adj_matrix > 0] = 1
# Makes sure that the dgraph is binary (contaings only 0 and 1).
if np.any(adj_matrix < 0):
raise ValueError(
'Directed graph should only have 0 and 1'
' as edge weight in checking classical balance ratio.')
# Generates all possible triads (3 nodes subgraphs).
nodes_list = np.array(dgraph.nodes())
triads = list(itertools.combinations(range(len(nodes_list)), 3))
balanced_triads = 0
unbalanced_triads = 0
for triad in triads:
triad_subgraph_matrix = utils.sub_adjacency_matrix(
adj_matrix, list(triad))
if balance_type == 1:
if is_fullyconnected_cartwright_harary_balance(
triad_subgraph_matrix):
balanced_triads += 1
else:
unbalanced_triads += 1
elif balance_type == 2:
if is_fullyconnected_clustering_balanced(triad_subgraph_matrix):
balanced_triads += 1
else:
unbalanced_triads += 1
elif balance_type == 3:
if is_fullyconnected_transitivity_balanced(triad_subgraph_matrix):
balanced_triads += 1
else:
unbalanced_triads += 1
return (
(balanced_triads / len(triads)) if len(triads) > 0 else 0,
balanced_triads,
unbalanced_triads)
# @enforce.runtime_validation
def sprase_balance_ratio(
dgraph: nx.DiGraph,
balance_type: int = 1) -> Tuple[float, int, int]:
"""Computes the ratio of sparse balance for all triads.
Parameter balance_type could take 1, 2, or 3. 1 is Cartwright & Harary,
2 is Clustering, and 3 is Transitivity.
Args:
dgraph: Given directed graph with 0, +1 and -1.
balance_type: The definition type for structural balance.
Returns:
The ratio of balance, number of balanced triads, number of unbalanced
triads.
Raises:
ValueError: If balance_type was anything but 1, 2 or 3.
"""
if balance_type not in [1, 2, 3]:
raise ValueError(
'Balance_type was incorrect.'
' It should be 1, 2, or 3. But it was: {}.'.format(balance_type))
# Makes the graph signed (unweighted).
adj_matrix = utils.dgraph2adjacency(dgraph)
adj_matrix[adj_matrix > 0] = 1
adj_matrix[adj_matrix < 0] = -1
# Generates all possible triads (3 nodes subgraphs).
nodes_list = np.array(dgraph.nodes())
triads = list(itertools.combinations(range(len(nodes_list)), 3))
balanced_triads = 0
unbalanced_triads = 0
for triad in triads:
triad_subgraph_matrix = utils.sub_adjacency_matrix(
adj_matrix, list(triad))
if balance_type == 1:
if is_sparsely_cartwright_harary_balanced(triad_subgraph_matrix):
balanced_triads += 1
else:
unbalanced_triads += 1
elif balance_type == 2:
if is_sparsely_clustering_balanced(triad_subgraph_matrix):
balanced_triads += 1
else:
unbalanced_triads += 1
elif balance_type == 3:
if is_sparsely_transitive_balanced(triad_subgraph_matrix):
balanced_triads += 1
else:
unbalanced_triads += 1
return (
(balanced_triads / len(triads)) if len(triads) > 0 else 0,
balanced_triads,
unbalanced_triads)
# @enforce.runtime_validation
def terzi_sprase_balance_ratio(
dgraph: nx.DiGraph,
undirected: bool = True) -> float:
"""Computes Terzi and Winkler (2011) generalized balance ratio.
It computes an approximated balance ratio of balanced triads divided by all
triads (similar to Cartwright and Harary but only for triads) for sparse
(aribtrary) networks using an efficient spectral algorithm
(Terzi and Winkler (2011) [A spectral algorithm for computing social
balance]). It works only for undirected graphs.
Note if we remove the to_undirected function and returns np.real part of
the balance ratio, this function looks at exaclty directed cyclic triads
(e.g., 1->2->3->1) and ignores any other triads that does not create a
proper cycle (The function works fine in that sense; however, as far as I
know, this does not make much sense with directed balance theory).
Args:
dgraph: Directed weighted graph to apply edge balance.
Returns:
The approximated ratio of balance.
Raises:
None
"""
# Makes the graph undirected (if not already).
if undirected:
graph = nx.to_undirected(dgraph)
else:
graph = dgraph
# Makes the graph signed (unweighted).
adj_matrix = utils.dgraph2adjacency(graph)
adj_matrix[adj_matrix > 0] = 1
adj_matrix[adj_matrix < 0] = -1
connectivity_matrix = abs(adj_matrix)
n, _ = adj_matrix.shape
lambdas, _ = np.linalg.eig(adj_matrix)
mus, _ = np.linalg.eig(connectivity_matrix)
balance_ratio = (
0.5 * (1 + np.sum(np.power(lambdas, 3)) / np.sum(np.power(mus, 3))))
if not undirected:
balance_ratio = np.real(balance_ratio)
return balance_ratio
# @enforce.runtime_validation
def kunegis_sprase_balance_ratio(
dgraph: nx.DiGraph,
undirected: bool = True) -> float:
"""Computes Kunegis et al. (2010) generalized balance ratio.
It computes an approximated balance ratio using spectral analysis of signed
laplacian matrix. More info can be found in the paper Kunegis et al. (2010)
[Spectral Analysis of Signed Graphs for Clustering, Prediction and
Visualization].
Note if we remove the to_undirected function and returns np.real part of
the balance ratio, this function looks at exaclty directed cyclic triads
(e.g., 1->2->3->1) and ignores any other triads that does not create a
proper cycle (The function works fine in that sense; however, as far as I
know, this does not make much sense with directed balance theory).
Args:
dgraph: Directed weighted graph to apply edge balance.
Returns:
The approximated ratio of balance.
Raises:
None
"""
# Makes the graph undirected (if not already).
# graph = nx.to_undirected(dgraph)
# # Makes the graph signed (unweighted).
# adj_matrix = np.array(nx.adjacency_matrix(graph).todense())
# adj_matrix[adj_matrix > 0] = 1
# adj_matrix[adj_matrix < 0] = -1
# n, _ = adj_matrix.shape
# # Computes the smallest eigenvalue of the signed normalized laplacian.
# signed_degree_matrix = np.diag(np.sum(abs(adj_matrix), axis=1))
# laplacian_matrix = signed_degree_matrix - adj_matrix
# diags = np.sum(abs(adj_matrix), axis=1).flatten()
# diags_sqrt = 1.0/sp.sqrt(diags)
# diags_sqrt[sp.isinf(diags_sqrt)] = 0
# degree_sqrt = np.diag(diags_sqrt)
# normalized_laplacian_matrix = np.eye(n) - np.dot(
# degree_sqrt, np.dot(laplacian_matrix, degree_sqrt))
# lambdas, _ = np.linalg.eig(normalized_laplacian_matrix)
# return min(lambdas)
if undirected:
graph = nx.to_undirected(dgraph)
else:
graph = dgraph
# Makes the graph signed (unweighted).
adj_matrix = utils.dgraph2adjacency(graph)
adj_matrix[adj_matrix > 0] = 1
adj_matrix[adj_matrix < 0] = -1
n, _ = adj_matrix.shape
# Computes the smallest eigenvalue of the signed normalized laplacian.
signed_degree_matrix = np.diag(np.sum(abs(adj_matrix), axis=1))
laplacian_matrix = signed_degree_matrix - adj_matrix
lambdas, _ = np.linalg.eig(laplacian_matrix)
result = 1 - min(lambdas)
if not undirected:
result = np.real(result)
return result
# @enforce.runtime_validation
def compute_vanderijt_edge_balance(
dgraph: nx.DiGraph) -> Dict[tuple, Dict[str, int]]:
"""Computes edge balance based on Van De Rijt (2011).
Args:
dgraph: Directed weighted graph to apply edge balance.
Returns:
Dictionary of edges mapped to the number of balanced
and total number of triads the edge is involved in.
Raises:
None
"""
edge_sign = {}
for edge in dgraph.edges():
nodes3 = 0
balanced_node3 = 0
nodes = list(set(dgraph.nodes()) - set(edge))
xij = dgraph.get_edge_data(edge[0], edge[1])['weight']
for node in nodes:
if (dgraph.has_edge(edge[0], node)
and dgraph.has_edge(node, edge[1])):
xik = dgraph.get_edge_data(edge[0], node)['weight']
xkj = dgraph.get_edge_data(node, edge[1])['weight']
nodes3 += 1
if np.sign(xij * xik * xkj) > 0:
balanced_node3 += 1
if nodes3:
edge_sign[edge] = {
'#balanced_node3': balanced_node3,
'#nodes3': nodes3}
return edge_sign
# edge_balance = {}
# cycle3s = set()
# for edge in dgraph.edges():
# triad_count = 0
# balanced_count = 0
# weight_sum = 0
# weight_distance = 0
# nodes = list(set(dgraph.nodes()) - set(edge))
# xij = dgraph.get_edge_data(edge[0], edge[1])['weight']
# for node in nodes:
# if dgraph.has_edge(
# edge[1], node) and dgraph.has_edge(node, edge[0]):
# triad_str = ','.join((str(edge[1]), str(node), str(edge[0])))
# if not no_isomorph_cycles or (
# no_isomorph_cycles and (triad_str not in cycle3s)):
# if triad_str not in cycle3s:
# triad_isomorph1_str = ','.join(
# (str(edge[0]), str(edge[1]), str(node)))
# triad_isomorph2_str = ','.join(
# (str(node), str(edge[0]), str(edge[1])))
# cycle3s = cycle3s.union(
# set([triad_str,
# triad_isomorph1_str,
# triad_isomorph2_str]))
# triad_count += 1
# xik = dgraph.get_edge_data(edge[1], node)['weight']
# xkj = dgraph.get_edge_data(node, edge[0])['weight']
# weight_sum += np.sign(xik) * np.sign(xkj)
# weight_distance += abs(xij - (xik * xkj))
# if np.sign(xij * xik * xkj) > 0:
# balanced_count += 1
# if triad_count:
# as_expected_sign = int(np.sign(weight_sum) == np.sign(xij))
# edge_balance[edge] = {
# '#balanced': balanced_count,
# '#cycle3': triad_count,
# 'weight_distance': weight_distance,
# 'as_expected_sign': as_expected_sign}
# return edge_balance
# @enforce.runtime_validation
def is_sparsely_cartwright_harary_balanced(
triad: np.ndarray,
everyone_aware_of_others: bool=True) -> bool:
"""Checks whether input triad matrix is balanced or not w.r.t. C & H.
Cartwright and Harary balance is defined on multiplication of every 3 edge
to be always positive. In the case of sparsity, whether two edge exist,
the third edge should also exist. If only one exist the third is not
required.
This also is aligned with Kulakowski (2005) paper because:
dr(i, j) / dt = sum_k r(i, k) * r(k, j)
Args:
triad: Input triad matrix.
everyone_aware_of_others: Is everyone aware of others or not.
Returns:
Boolean result whether triad is transitively balanced or not.
Raises:
ValueError: If there is a self loop in the given triad or triad was not
3 * 3.
"""
n, m = triad.shape
if n != 3 or m != 3:
raise ValueError('Triad has unexpected shape.')
for i in range(n):
if triad[i, i]:
raise ValueError('There is a self loop in given triad: {}.'.format(
triad))
for (i, j, k) in list(itertools.permutations([0, 1, 2])):
if everyone_aware_of_others or abs(triad[i, j]):
if (abs(triad[i, k]) and abs(triad[k, j])
and (triad[i, j] != triad[i, k] * triad[k, j])):
return False
return True
# @enforce.runtime_validation
def is_sparsely_clustering_balanced(
triad: np.ndarray,
everyone_aware_of_others: bool=True) -> bool:
"""Checks whether input triad matrix is clustering balance (Davis 1967).
x_{ij} sim x_{ik}x_{kj}, \text{for} k \neq i, j\\
\text{and} (x_{ik} > 0 \text{or} x_{kj} > 0)
Args:
triad: Input triad matrix.
everyone_aware_of_others: Is everyone aware of others or not.
Returns:
Boolean result whether triad is clustering balanced or not.
Raises:
ValueError: If there is a self loop in the given triad or triad was not
3 * 3.
"""
n, m = triad.shape
if n != 3 or m != 3:
raise ValueError('Triad has unexpected shape.')
for i in range(n):
if triad[i, i]:
raise ValueError('There is a self loop in given triad: {}.'.format(
triad))
for (i, j, k) in list(itertools.permutations([0, 1, 2])):
if everyone_aware_of_others or abs(triad[i, j]):
if ((abs(triad[i, k]) and abs(triad[k, j]))
and (triad[i, k] > 0 or triad[k, j] > 0)
and (triad[i, j] != triad[i, k] * triad[k, j])):
return False
return True
# @enforce.runtime_validation
def is_sparsely_transitive_balanced(
triad: np.ndarray,
everyone_aware_of_others: bool=True) -> bool:
"""Checks whether input triad matrix is transitively balanced or not.
Transitive balance is defined on only one rule:
Friend of friend is friend.
For every path from i to j, if there is x_{ij} or x_{ji}
(at least one is available), then this means they know each other and
there should be a tension.
When everyone_aware_of_others is True, this means having null sometimes is
not accepted. For instance, when a is friend with b and b is friend with c,
a should be friend with c too since a and c are aware of each other. In
this case, sparsity helps when the conditions to none of 4 axioms are
valid. If everyone_aware_of_others is False, then we enforce balance rules
when there is an edge for every ij (which means if abs(ij)).
Args:
triad: Input triad matrix.
everyone_aware_of_others: Is everyone aware of others or not.
Returns:
Boolean result whether triad is transitively balanced or not.
Raises:
ValueError: If there is a self loop in the given triad or triad was not
3 * 3.
"""
n, m = triad.shape
if n != 3 or m != 3:
raise ValueError('Triad has unexpected shape.')
for i in range(3):
if triad[i, i]:
raise ValueError('There is a self loop in given triad: {}.'.format(
triad))
for (i, j, k) in list(itertools.permutations([0, 1, 2])):
if everyone_aware_of_others or abs(triad[i, j]):
if ((abs(triad[i, k]) and abs(triad[k, j]))
and (triad[i, k] > 0 and triad[k, j] > 0)
and (triad[i, j] != triad[i, k] * triad[k, j])):
return False
return True
# # @enforce.runtime_validation
# def is_sparsely_ranked_clustering_balanced(triad: np.ndarray) -> bool:
# """Checks whether input triad matrix is ranked clustering balance.
# x_{ij} sim x_{ik}x_{kj}, \text{for} k \neq i, j\\
# \text{and} x_{ik} > 0
# Args:
# triad: Input triad matrix.
# Returns:
# Boolean result whether triad is clustering balanced or not.
# Raises:
# ValueError: If there is a self loop in the given triad or triad was
# not
# 3 * 3.
# """
# n, m = triad.shape
# if n != 3 or m != 3:
# raise ValueError('Triad has unexpected shape.')
# for i in range(n):
# if triad[i, i]:
# raise ValueError('There is a self loop in given triad: {}.'
# .format(
# triad))
# for (i, j, k) in list(itertools.permutations([0, 1, 2])):
# if abs(triad[i, j]) or abs(triad[j, i]):
# # If they exist.
# if ((abs(triad[i, k]) and abs(triad[k, j]))
# and (triad[i, k] > 0)
# and (triad[i, j] != triad[i, k]*triad[k, j])):
# return False
# return True
# @enforce.runtime_validation
def is_fullyconnected_cartwright_harary_balance(triad: np.ndarray) -> bool:
"""Checks whether the fully connected input triad is classically balanced.
With the definition of Cartwright and Harary.
Args:
triad: Input triad matrix with only 0 and 1.
everyone_aware_of_others: Is everyone aware of others or not.
Returns:
Boolean result whether triad is transitively balanced or not.
Raises:
ValueError: If there is a self loop in the given triad or triad was not
3 * 3.
"""
n, m = triad.shape
if n != 3 or m != 3:
raise ValueError('Triad has unexpected shape.')
for i in range(n):
if triad[i, i]:
raise ValueError('There is a self loop in given triad: {}.'.format(
triad))
if np.any(triad < 0):
raise ValueError('Triad should only consist of 0 and 1:\n {}.'.format(
triad))
signed_triad = triad.copy()
signed_triad[signed_triad == 0] = -1
for (i, j, k) in list(itertools.permutations([0, 1, 2])):
if (signed_triad[i, j] * signed_triad[i, k] * signed_triad[k, j]) != 1:
return False
return True
# @enforce.runtime_validation
def is_fullyconnected_clustering_balanced(triad: np.ndarray) -> bool:
"""Checks whether the fully connected input triad is clustering balanced.
Args:
triad: Input triad matrix with only 0 and 1.
everyone_aware_of_others: Is everyone aware of others or not.
Returns:
Boolean result whether triad is transitively balanced or not.
Raises:
ValueError: If there is a self loop in the given triad or triad was not
3 * 3.
"""
n, m = triad.shape
if n != 3 or m != 3:
raise ValueError('Triad has unexpected shape.')
for i in range(n):
if triad[i, i]:
raise ValueError('There is a self loop in given triad: {}.'.format(
triad))
if np.any(triad < 0):
raise ValueError('Triad should only consist of 0 and 1:\n {}.'.format(
triad))
signed_triad = triad.copy()
signed_triad[signed_triad == 0] = -1
for (i, j, k) in list(itertools.permutations([0, 1, 2])):
if signed_triad[i, k] > 0 or signed_triad[k, j] > 0:
xijk = signed_triad[i, j] * signed_triad[i, k] * signed_triad[k, j]
if xijk != 1:
return False
return True
# @enforce.runtime_validation
def is_fullyconnected_transitivity_balanced(triad: np.ndarray) -> bool:
"""Checks whether the fully connected input triad is transivitiy balanced.
Args:
triad: Input triad matrix with only 0 and 1.
everyone_aware_of_others: Is everyone aware of others or not.
Returns:
Boolean result whether triad is transitively balanced or not.
Raises:
ValueError: If there is a self loop in the given triad or triad was not
3 * 3.
"""
n, m = triad.shape
if n != 3 or m != 3:
raise ValueError('Triad has unexpected shape.')
for i in range(n):
if triad[i, i]:
raise ValueError('There is a self loop in given triad: {}.'.format(
triad))
if np.any(triad < 0):
raise ValueError('Triad should only consist of 0 and 1:\n {}.'.format(
triad))
signed_triad = triad.copy()
signed_triad[signed_triad == 0] = -1
for (i, j, k) in list(itertools.permutations([0, 1, 2])):
if signed_triad[i, k] > 0 and signed_triad[k, j] > 0:
xijk = signed_triad[i, j] * signed_triad[i, k] * signed_triad[k, j]
if xijk != 1:
return False
return True
# @enforce.runtime_validation
def _get_all_triad_permutations(triad_matrix: np.ndarray) -> Set[str]:
"""Gets all of permutations of nodes in a matrix in string format.
It computes different matrices with swapping same columns and rows.
Args:
triad_matrix: The triad adjacency matrix.
Returns:
Set of string permutations of the triad adjacency matrices.
Raises:
None.
"""
permutations = [triad_matrix]
mat01 = utils.swap_nodes_in_matrix(triad_matrix, 0, 1)
mat02 = utils.swap_nodes_in_matrix(triad_matrix, 0, 2)
mat12 = utils.swap_nodes_in_matrix(triad_matrix, 1, 2)
permutations.extend([mat01, mat02, mat12])
permutations.extend(
[utils.swap_nodes_in_matrix(mat01, 0, 2),
utils.swap_nodes_in_matrix(mat01, 1, 2),
utils.swap_nodes_in_matrix(mat02, 0, 1),
utils.swap_nodes_in_matrix(mat02, 1, 2),
utils.swap_nodes_in_matrix(mat12, 0, 1),
utils.swap_nodes_in_matrix(mat12, 0, 2)])
result = set()
for permutation in permutations:
result.add(str(permutation))
return result
# @enforce.runtime_validation
def generate_all_possible_sparse_triads(
) -> Tuple[Dict[str, int], List[np.ndarray]]:
"""Generates all possible triads in sparse balance theory.
Args:
None.
Returns:
Dictionary of intiallized all sparse triad types.
Raises:
None.
"""
possible_edges = [0, 1, -1]
adj_matrices = []
for i1 in possible_edges:
for i2 in possible_edges:
for i3 in possible_edges:
for i4 in possible_edges:
for i5 in possible_edges:
for i6 in possible_edges:
adj_matrices.append(np.array(
[[0, i1, i2], [i3, 0, i4], [i5, i6, 0]]))
triad_map = {}
triad_list = []
for adj_matrix in adj_matrices:
if str(adj_matrix) not in triad_map:
triad_list.append(adj_matrix)
triad_index = len(triad_list) - 1
for permutation in _get_all_triad_permutations(adj_matrix):
triad_map[str(permutation)] = triad_index
# triads = {'triad_map': triad_map, 'triad_list': triad_list}
return triad_map, triad_list
def generate_all_possible_triads() -> Tuple[Dict[str, int], List[np.ndarray]]:
"""Generates all possible triads in classical balance theory.
Args:
None.
Returns:
Dictionary of intiallized all triad types and their adjancecy matrices.
Raises:
None.
"""
triad_list = [
np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), # Triad label 300
np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]]), # Triad label 102
np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), # Triad label 003
np.array([[0, 0, 1], [1, 0, 1], [1, 0, 0]]), # Triad label 120D
np.array([[0, 1, 1], [0, 0, 0], [1, 1, 0]]), # Triad label 120U
np.array([[0, 1, 1], [0, 0, 1], [0, 0, 0]]), # Triad label 030T
np.array([[0, 0, 0], [1, 0, 1], [0, 0, 0]]), # Triad label 021D
np.array([[0, 1, 0], [0, 0, 0], [0, 1, 0]]), # Triad label 021U
np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]]), # Triad label 012
np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]]), # Triad label 021C
np.array([[0, 1, 0], [1, 0, 1], [0, 0, 0]]), # Triad label 111U
np.array([[0, 1, 0], [1, 0, 0], [0, 1, 0]]), # Triad label 111D
np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]), # Triad label 030C
np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]]), # Triad label 201
np.array([[0, 1, 0], [1, 0, 1], [1, 0, 0]]), # Triad label 120C
np.array([[0, 1, 0], [1, 0, 1], [1, 1, 0]])] # Triad label 210
triad_map = {}
for triad_index, triad in enumerate(triad_list):
if str(triad) not in triad_map:
for permutation in _get_all_triad_permutations(triad):
triad_map[str(permutation)] = triad_index
return triad_map, triad_list
# @enforce.runtime_validation
def _detect_triad_type_for_all_subgraph3(
dgraph: nx.DiGraph,
triad_map: Dict[str, int] = None,
sparse_triads: bool = True,
verbose: bool = False) -> Dict[str, int]:
"""Detects triad type for all possible subgraphs of size 3 in given graph.
Args:
dgraph: The directed graph.
triad_map: Initialized sparse triad map (string to triad type index).
sparse_triads: Whether want to generate sparse triads if not triad_map.
verbose: Whether we want it to print '.' as the finished indicator.
Returns:
Dictionary of string name of subgraph to its triad type index.
Raises:
None.
"""
if not triad_map:
if sparse_triads:
triad_map, _ = generate_all_possible_sparse_triads()
else:
triad_map, _ = generate_all_possible_triads()
subgraph2triad_type = {}
nodes_list = np.array(dgraph.nodes())
adj_matrix = utils.dgraph2adjacency(dgraph)
adj_matrix[adj_matrix > 0] = 1
adj_matrix[adj_matrix < 0] = -1
triads = list(itertools.combinations(range(len(nodes_list)), 3))
for triad in triads:
triad_subgraph_matrix = utils.sub_adjacency_matrix(
adj_matrix, list(triad))
triad_subgraph_key = str(np.array(triad_subgraph_matrix, dtype=int))
if triad_subgraph_key not in triad_map:
print(triad, 'is not found.')
print('Their names are:', nodes_list[np.array(triad)])
print('Simplified subgraph was:', triad_subgraph_matrix)
else:
triad_type_index = triad_map[triad_subgraph_key]
# It is imported to sort the key name unless final dictionary
# might have non-unique keys.
subgraph2triad_type[str(tuple(
sorted(nodes_list[np.array(triad)])))] = triad_type_index
if verbose:
print('.', end='')
return subgraph2triad_type
# @enforce.runtime_validation
def compute_transition_matrix(
dgraphs: List[nx.DiGraph],
unique_triad_num: int,
triad_map: Dict[str, int] = None,
sparse_triads: bool = True,
verbose: bool = False) -> Dict[
str, Union[List[np.ndarray], List[Dict]]]:
"""Computes transition matrix and triads count for every consequetive graph.
Args:
dgraphs: List of graphs in timely order.
unique_triad_num: Number of unique sparse triads.
triad_map: Initialized sparse triad map (string to triad type index).
sparse_triads: Whether want to generate sparse triads if not triad_map.
verbose: If we want to print a . as progress while computing.
Returns:
Dictionary of list of transition matrices and list of all subgraphs3
with their corresponding triad index.
Raises:
ValueError: If the size of dgraphs is not at least 2.
"""
if len(dgraphs) < 2:
raise ValueError(
'We need at least 2 directed graphs for computing transition.')
if not triad_map:
if sparse_triads:
triad_map, triad_list = generate_all_possible_sparse_triads()
else:
triad_map, triad_list = generate_all_possible_triads()
unique_triad_num = len(triad_list)
# Detects the sparse triad types of all networks.
triads_types = [_detect_triad_type_for_all_subgraph3(
dgraph=dgraph,
triad_map=triad_map,
sparse_triads=sparse_triads,
verbose=verbose)
for dgraph in dgraphs]
transition_matrices = []
for index in range(len(dgraphs)-1):
triads_type1 = triads_types[index] # First graph.
triads_type2 = triads_types[index + 1] # Subsequent graph.
intersected_keys = list(set.intersection(
set(triads_type1.keys()), set(triads_type2.keys())))
transition_matrix = np.zeros((unique_triad_num, unique_triad_num))
for key in intersected_keys:
transition_matrix[triads_type1[key], triads_type2[key]] += 1
transition_matrix = utils.make_matrix_row_stochastic(transition_matrix)
transition_matrices.append(transition_matrix)
return {'transition_matrices': transition_matrices,
'triads_types': triads_types}
# @enforce.runtime_validation
def _get_eigen_decomposition_of_markov_transition(
transition_matrix: np.ndarray,
aperiodic_irreducible_eps: float = 0.0001) -> Tuple:
"""Gets the eigen value and vectors from transition matrix.
A Markov chain is irreducible if we can go from any state to any state.
This entails all transition probabilities > 0.
A Markov chain is aperiodic if all states are accessible from all other
states. This entails all transition probabilities > 0.
Args:
transition_matrix: Square Markov transition matrix.
aperiodic_irreducible_eps: To make the matrix aperiodic/irreducible.
Returns:
Dictionary of eigen val/vec of irreducible and aperiodic markov chain.
Raises:
ValueError: If the matrix was not squared.
"""
if transition_matrix.shape[0] != transition_matrix.shape[1]:
raise ValueError('Transition matrix is not squared.')
matrix = transition_matrix.copy()
matrix = np.nan_to_num(matrix)
matrix += aperiodic_irreducible_eps
aperiodic_irreducible_transition_matrix = (
matrix.T / np.sum(matrix, axis=1)).T
eigen_values, eigen_vectors = np.linalg.eig(
aperiodic_irreducible_transition_matrix.T)
return eigen_values, eigen_vectors
# @enforce.runtime_validation
def get_stationary_distribution(
transition_matrix: np.ndarray,
aperiodic_irreducible_eps: float = 0.0001) -> np.ndarray:
"""Gets the stationary distribution of given transition matrix.
A Markov chain is irreducible if we can go from any state to any state.
This entails all transition probabilities > 0.
A Markov chain is aperiodic if all states are accessible from all other
states. This entails all transition probabilities > 0.
Args:
transition_matrix: Square Markov transition matrix.
aperiodic_irreducible_eps: To make the matrix aperiodic/irreducible.
Returns:
Array of size one dimension of matrix.
Raises:
ValueError: If the matrix was not squared.
"""
eigen_values, eigen_vectors = (
_get_eigen_decomposition_of_markov_transition(
transition_matrix=transition_matrix,
aperiodic_irreducible_eps=aperiodic_irreducible_eps))
index = np.where(eigen_values > 0.99)[0][0]
stationary_distribution = [item.real for item in eigen_vectors[:, index]]
stationary_distribution /= np.sum(stationary_distribution)
return stationary_distribution
# @enforce.runtime_validation
def get_mixing_time_range(
transition_matrix: np.ndarray,
aperiodic_irreducible_eps: float = 0.0001,
distance_from_stationary_eps: float = 0.01,
verbose: bool = False) -> np.float64:
"""Gets the mixing time with respect to given distance eps.
For more information one can look at:
https://www.math.dartmouth.edu/~pw/M100W11/nathan.pdf
and
https://math.dartmouth.edu/~pw/math100w13/kale.pdf
Argument distance_from_stationary_eps is eps in the document of the
first link.
Args:
transition_matrix: Square Markov transition matrix.
aperiodic_irreducible_eps: To make the matrix aperiodic/irreducible.
distance_from_stationary_eps: Distance from stationary distribution.
verbose: Whether we need it to print about lambda2 and pie_star.
Returns:
Number of steps in float type.
Raises:
ValueError: If the matrix was not squared.
"""
eigen_values, eigen_vectors = (
_get_eigen_decomposition_of_markov_transition(
transition_matrix=transition_matrix,
aperiodic_irreducible_eps=aperiodic_irreducible_eps))
index = np.where(eigen_values > 0.99)[0][0]
stationary_distribution = [item.real for item in eigen_vectors[:, index]]
stationary_distribution /= sum(stationary_distribution)
lambda2 = sorted(eigen_values, reverse=True)[1]
pie_star = np.min(stationary_distribution)
if verbose:
print('\nlambda2: {}\npie_star: {}'.format(
np.real(lambda2), pie_star))
tau = (1/(1-lambda2)) * np.log(1/(pie_star*distance_from_stationary_eps))
return np.real(tau)
# @enforce.runtime_validation
def _randomize_network(
dgraph: nx.DiGraph,
switching_count_coef: int = 300) -> nx.DiGraph:
"""Generates randomized network for a given directed graph.
It preserves the in- and out- degree intact. It keeps the degree
distribution by randomly switches single and double edges for at least
MAX_TIMES time nothing changed.
Args:
dgraph: The input directed graph.
switching_count_coef: The coef for number of edge switchings.
Returns:
Adjacency matrix of randomized network with same in- and out-degree.
Raises:
Exception if the algorithm does not converge.
"""
# If after MAX_TIMES times, it couldn't switch, we call it consensus
# (convergence) and terminate the algorithm.
MAX_TIMES = 1000
edge_count = len(dgraph.edges())
adj = utils.dgraph2adjacency(dgraph=dgraph)
desired_switching_count = switching_count_coef * edge_count
switching_count = 0
prev_switching_count = 0
counter = 0
while switching_count < desired_switching_count:
# Randomly choose 2 edges.
counter += 1
binarized_adj = abs(adj.copy())
binarized_adj[binarized_adj > 0] = 1
both_double_edges = np.floor((binarized_adj + binarized_adj.T)/2)
double_edges = np.where(both_double_edges > 0)
# Double edges.
i, j = np.random.choice(
range(len(double_edges[0])), size=2, replace=False)
s1 = double_edges[0][i]
t1 = double_edges[1][i]
s2 = double_edges[0][j]
t2 = double_edges[1][j]
if not (adj[s1, t2] or adj[s2, t1] or adj[t2, s1] or adj[t1, s2]
or s1 == t2 or s1 == s2 or s2 == t1 or t1 == t2):
utils.swap_two_elements_in_matrix(adj, s1, t1, s1, t2)
utils.swap_two_elements_in_matrix(adj, t1, s1, t2, s1)
utils.swap_two_elements_in_matrix(adj, s2, t2, s2, t1)
utils.swap_two_elements_in_matrix(adj, t2, s2, t1, s2)
switching_count += 1
# Single edges.
# Need to compute it again because adj might have been changed.
binarized_adj = abs(adj.copy())
binarized_adj[binarized_adj > 0] = 1
both_double_edges = np.floor((binarized_adj + binarized_adj.T)/2)
single_edges = np.where(binarized_adj - both_double_edges > 0)
i, j = np.random.choice(
range(len(single_edges[0])), size=2, replace=False)
s1 = single_edges[0][i]
t1 = single_edges[1][i]
s2 = single_edges[0][j]
t2 = single_edges[1][j]
if not(adj[s1, t2] or adj[s2, t1]
or s1 == t2 or s1 == s2 or s2 == t1 or t1 == t2):
utils.swap_two_elements_in_matrix(adj, s1, t1, s1, t2)
utils.swap_two_elements_in_matrix(adj, s2, t2, s2, t1)
switching_count += 1
if not counter % MAX_TIMES:
if prev_switching_count == switching_count:
raise Exception('Not converged.')
else:
prev_switching_count = switching_count
return utils.adjacency2digraph(adj_matrix=adj, similar_this_dgraph=dgraph)
# @enforce.runtime_validation
def compute_randomized_transition_matrix(
dgraph1: nx.DiGraph,
dgraph2: nx.DiGraph,
unique_triad_num: int,
triad_map: Dict[str, int] = None,
switching_count_coef: int = 300,
randomized_num: int = 100,
sparse_triads: bool = True) -> List[np.ndarray]:
"""Computes the transition of many randomized versions of two networks.
Args:
dgraph1: First directed graph.
dgraph2: Second directed graph.
unique_triad_num: Number of unique sparse triads.
triad_map: Initialized sparse triad map (string to triad type index).
switching_count_coef: The coef for number of edge switchings.
randomized_num: Number of transition matrices to generate.
sparse_triads: Whether want to generate sparse triads if not triad_map.
Returns:
List of transition matrices from subsequent randomized networks.
Raises:
None.
"""
if not triad_map:
if sparse_triads:
triad_map, triad_list = generate_all_possible_sparse_triads()
else:
triad_map, triad_list = generate_all_possible_triads()
unique_triad_num = len(triad_list)
rand_transition_matrices = []
for _ in range(randomized_num):
rand_dgraph1 = _randomize_network(
dgraph=dgraph1, switching_count_coef=switching_count_coef)
rand_dgraph2 = _randomize_network(
dgraph=dgraph2, switching_count_coef=switching_count_coef)
rand_transition_matrices.append(
compute_transition_matrix(
dgraphs=[rand_dgraph1, rand_dgraph2],
unique_triad_num=unique_triad_num,
triad_map=triad_map)['transition_matrices'][0])
return rand_transition_matrices
# @enforce.runtime_validation
def get_robustness_of_transitions(
transition_matrices: List[np.ndarray],
lnorm: int = 2) -> pd.DataFrame:
"""Gets stationary dist of each transition and dist/corr with average one.
Args:
transition_matrices: List of squared transition matrices.
lnorm: The norm integer (l1 or l2 usually).
We compute the average transition matrix from all given transition matrices
and then compute the stationary distribution for that matrix. Then the
objective is to find out how different/robust all transitions are with
respect to the stationary distribution from average transtision matrix. We
compute lnorm distance and also Pearson correlation of them with average
distribution. Also it returns the list of stationary distributions.
Returns:
Dataframe of distance and Pearsons correlation from average transition.
Raises:
None.
"""
n, _ = transition_matrices[0].shape
avg_transition_matrix = np.zeros((n, n))
for i in range(len(transition_matrices)):
avg_transition_matrix += transition_matrices[i]
avg_transition_matrix /= n
result = []
avg_stationary_distribution = get_stationary_distribution(
avg_transition_matrix)
for index, transition_matrix in enumerate(transition_matrices):
matrix_dist_distance = np.linalg.norm(
avg_transition_matrix - transition_matrix, lnorm)
matrix_dist_rval, matrix_dist_pval = sp.stats.pearsonr(
avg_transition_matrix.flatten(), transition_matrix.flatten())
stationary_dist = get_stationary_distribution(transition_matrix)
stationary_dist_distance = np.linalg.norm(
avg_stationary_distribution - stationary_dist, lnorm)
stationary_dist_rval, stationary_dist_pval = sp.stats.pearsonr(
avg_stationary_distribution, stationary_dist)
result.append(
['Period {} to Period {}'.format(index+1, index+2),
matrix_dist_distance,
matrix_dist_rval,
matrix_dist_pval,
stationary_dist_distance,
stationary_dist_rval,
stationary_dist_pval])
result = pd.DataFrame(
result, columns=[
'Transitions',
'Matrix L{}-Norm Dist. from Average'.format(lnorm),
'Matrix Pearson r-value',
'Matrix Pearson p-value',
'Stationary Dist. L{}-Norm Dist. from Average'.format(lnorm),
'Stationary Dist. Pearson r-value',
'Stationary Dist. Pearson p-value'])
return result
# @enforce.runtime_validation
def generate_converted_graphs(
dgraph: nx.DiGraph,
convert_from: float = 0.0,
convert_to: float = 1.0,
percentage: float = 5.0,
how_many_to_generate: int = 10) -> List[nx.DiGraph]:
"""Generates a list digraphs with randomly converting a percentage of sign.
It generates a list of graphs with partially converting a given percentage
of edge sign from given content to another given content from one given
directed graph. This has been done to perform a robustness check on whether
one of edge weights were infered/given wrong (it should have been value
convert_to; however, it has been set to value convert_from). In this way,
we randomly convert a small (i.e. 5%) from another edge weight and return
the graphs to see whether the subsequent analysis is robust with respect to
this possible noise in the data.
Args:
dgraph: Given directed graph.
percentage: The percentage of edges to do randomly sign conversion.
convert_from: Converts from this value.
convert_to: Converts to this value.
how_many_to_generate: How many new directed graphs to be generated.
Returns:
A list of generated directed graphs.
Raises:
ValueError: If percentage was wrong or dgraph does not contain
convert_from value.
"""
# The process is easier to be applied on adjacency matrix.
if percentage < 0 or percentage > 100:
raise ValueError(
'Inputted percentage was wrong: {}.'.format(percentage))
original_adj_matrix = utils.dgraph2adjacency(dgraph=dgraph)
unq_val = max(convert_from, convert_to) + 5 # Value that is not targeted.
np.fill_diagonal(original_adj_matrix, unq_val)
from_edges = np.where(original_adj_matrix == convert_from)
from_edges_cnt = len(from_edges[0])
if not from_edges_cnt:
raise ValueError(
'Inputted directed graph does not contain the edge weight'
' equals {}.'.format(convert_from))
generated_dgraphs = []
for _ in range(how_many_to_generate):
adj_matrix = original_adj_matrix.copy()
selected_indices = np.random.choice(
from_edges_cnt,
int(percentage * from_edges_cnt / 100),
replace=False)
for index in selected_indices:
adj_matrix[from_edges[0][index], from_edges[1][index]] = convert_to
# This is a social network and should not contain any self-loop.
np.fill_diagonal(adj_matrix, 0)
generated_dgraph = utils.adjacency2digraph(
adj_matrix=adj_matrix,
similar_this_dgraph=dgraph)
generated_dgraphs.append(generated_dgraph)
return generated_dgraphs
| [
"numpy.log",
"utils.adjacency2digraph",
"numpy.array",
"networkx.closeness_centrality",
"networkx.betweenness_centrality",
"utils.swap_nodes_in_matrix",
"numpy.linalg.norm",
"scipy.stats.pearsonr",
"datetime.timedelta",
"utils.check_required_columns",
"numpy.mean",
"seaborn.set",
"numpy.wher... | [((1748, 1833), 'utils.check_required_columns', 'utils.check_required_columns', (['selected_edge_list', "['source', 'target', 'weight']"], {}), "(selected_edge_list, ['source', 'target', 'weight']\n )\n", (1776, 1833), False, 'import utils\n'), ((3343, 3431), 'utils.check_required_columns', 'utils.check_required_columns', (['edge_list', "['edge_date', 'source', 'target', 'weight']"], {}), "(edge_list, ['edge_date', 'source', 'target',\n 'weight'])\n", (3371, 3431), False, 'import utils\n'), ((6226, 6280), 'utils.check_required_columns', 'utils.check_required_columns', (['edge_list', "['edge_date']"], {}), "(edge_list, ['edge_date'])\n", (6254, 6280), False, 'import utils\n'), ((9150, 9166), 'numpy.mean', 'np.mean', (['weights'], {}), '(weights)\n', (9157, 9166), True, 'import numpy as np\n'), ((9196, 9211), 'numpy.std', 'np.std', (['weights'], {}), '(weights)\n', (9202, 9211), True, 'import numpy as np\n'), ((9400, 9424), 'networkx.to_undirected', 'nx.to_undirected', (['dgraph'], {}), '(dgraph)\n', (9416, 9424), True, 'import networkx as nx\n'), ((9587, 9637), 'networkx.algebraic_connectivity', 'nx.algebraic_connectivity', (['ugraph'], {'weight': '"""weight"""'}), "(ugraph, weight='weight')\n", (9612, 9637), True, 'import networkx as nx\n'), ((10185, 10232), 'networkx.algebraic_connectivity', 'nx.algebraic_connectivity', (['GCC'], {'weight': '"""weight"""'}), "(GCC, weight='weight')\n", (10210, 10232), True, 'import networkx as nx\n'), ((10272, 10288), 'networkx.diameter', 'nx.diameter', (['GCC'], {}), '(GCC)\n', (10283, 10288), True, 'import networkx as nx\n'), ((12354, 12400), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (6 * n, 6 * m)}"}), "(rc={'figure.figsize': (6 * n, 6 * m)})\n", (12361, 12400), True, 'import seaborn as sns\n'), ((12499, 12541), 'networkx.layout.spring_layout', 'nx.layout.spring_layout', (['aggregated_dgraph'], {}), '(aggregated_dgraph)\n', (12522, 12541), True, 'import networkx as nx\n'), ((16814, 16838), 'networkx.simple_cycles', 'nx.simple_cycles', (['dgraph'], {}), '(dgraph)\n', (16830, 16838), True, 'import networkx as nx\n'), ((18275, 18305), 'utils.dgraph2adjacency', 'utils.dgraph2adjacency', (['dgraph'], {}), '(dgraph)\n', (18297, 18305), False, 'import utils\n'), ((18418, 18440), 'numpy.any', 'np.any', (['(adj_matrix < 0)'], {}), '(adj_matrix < 0)\n', (18424, 18440), True, 'import numpy as np\n'), ((20601, 20631), 'utils.dgraph2adjacency', 'utils.dgraph2adjacency', (['dgraph'], {}), '(dgraph)\n', (20623, 20631), False, 'import utils\n'), ((23067, 23096), 'utils.dgraph2adjacency', 'utils.dgraph2adjacency', (['graph'], {}), '(graph)\n', (23089, 23096), False, 'import utils\n'), ((23255, 23280), 'numpy.linalg.eig', 'np.linalg.eig', (['adj_matrix'], {}), '(adj_matrix)\n', (23268, 23280), True, 'import numpy as np\n'), ((23294, 23328), 'numpy.linalg.eig', 'np.linalg.eig', (['connectivity_matrix'], {}), '(connectivity_matrix)\n', (23307, 23328), True, 'import numpy as np\n'), ((25568, 25597), 'utils.dgraph2adjacency', 'utils.dgraph2adjacency', (['graph'], {}), '(graph)\n', (25590, 25597), False, 'import utils\n'), ((25914, 25945), 'numpy.linalg.eig', 'np.linalg.eig', (['laplacian_matrix'], {}), '(laplacian_matrix)\n', (25927, 25945), True, 'import numpy as np\n'), ((35745, 35762), 'numpy.any', 'np.any', (['(triad < 0)'], {}), '(triad < 0)\n', (35751, 35762), True, 'import numpy as np\n'), ((36896, 36913), 'numpy.any', 'np.any', (['(triad < 0)'], {}), '(triad < 0)\n', (36902, 36913), True, 'import numpy as np\n'), ((38141, 38158), 'numpy.any', 'np.any', (['(triad < 0)'], {}), '(triad < 0)\n', (38147, 38158), True, 'import numpy as np\n'), ((39078, 39124), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['triad_matrix', '(0)', '(1)'], {}), '(triad_matrix, 0, 1)\n', (39104, 39124), False, 'import utils\n'), ((39137, 39183), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['triad_matrix', '(0)', '(2)'], {}), '(triad_matrix, 0, 2)\n', (39163, 39183), False, 'import utils\n'), ((39196, 39242), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['triad_matrix', '(1)', '(2)'], {}), '(triad_matrix, 1, 2)\n', (39222, 39242), False, 'import utils\n'), ((43650, 43680), 'utils.dgraph2adjacency', 'utils.dgraph2adjacency', (['dgraph'], {}), '(dgraph)\n', (43672, 43680), False, 'import utils\n'), ((48006, 48027), 'numpy.nan_to_num', 'np.nan_to_num', (['matrix'], {}), '(matrix)\n', (48019, 48027), True, 'import numpy as np\n'), ((48195, 48251), 'numpy.linalg.eig', 'np.linalg.eig', (['aperiodic_irreducible_transition_matrix.T'], {}), '(aperiodic_irreducible_transition_matrix.T)\n', (48208, 48251), True, 'import numpy as np\n'), ((49446, 49477), 'numpy.sum', 'np.sum', (['stationary_distribution'], {}), '(stationary_distribution)\n', (49452, 49477), True, 'import numpy as np\n'), ((50961, 50992), 'numpy.min', 'np.min', (['stationary_distribution'], {}), '(stationary_distribution)\n', (50967, 50992), True, 'import numpy as np\n'), ((51191, 51203), 'numpy.real', 'np.real', (['tau'], {}), '(tau)\n', (51198, 51203), True, 'import numpy as np\n'), ((52072, 52109), 'utils.dgraph2adjacency', 'utils.dgraph2adjacency', ([], {'dgraph': 'dgraph'}), '(dgraph=dgraph)\n', (52094, 52109), False, 'import utils\n'), ((54313, 54380), 'utils.adjacency2digraph', 'utils.adjacency2digraph', ([], {'adj_matrix': 'adj', 'similar_this_dgraph': 'dgraph'}), '(adj_matrix=adj, similar_this_dgraph=dgraph)\n', (54336, 54380), False, 'import utils\n'), ((57215, 57231), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (57223, 57231), True, 'import numpy as np\n'), ((60463, 60500), 'utils.dgraph2adjacency', 'utils.dgraph2adjacency', ([], {'dgraph': 'dgraph'}), '(dgraph=dgraph)\n', (60485, 60500), False, 'import utils\n'), ((60584, 60630), 'numpy.fill_diagonal', 'np.fill_diagonal', (['original_adj_matrix', 'unq_val'], {}), '(original_adj_matrix, unq_val)\n', (60600, 60630), True, 'import numpy as np\n'), ((60648, 60693), 'numpy.where', 'np.where', (['(original_adj_matrix == convert_from)'], {}), '(original_adj_matrix == convert_from)\n', (60656, 60693), True, 'import numpy as np\n'), ((3564, 3616), 'numpy.floor', 'np.floor', (['((end_date - start_date).days / (weeks * 7))'], {}), '((end_date - start_date).days / (weeks * 7))\n', (3572, 3616), True, 'import numpy as np\n'), ((6417, 6469), 'numpy.floor', 'np.floor', (['((end_date - start_date).days / (weeks * 7))'], {}), '((end_date - start_date).days / (weeks * 7))\n', (6425, 6469), True, 'import numpy as np\n'), ((12455, 12478), 'networkx.compose_all', 'nx.compose_all', (['dgraphs'], {}), '(dgraphs)\n', (12469, 12478), True, 'import networkx as nx\n'), ((12595, 12623), 'matplotlib.pyplot.subplot', 'plt.subplot', (['m', 'n', '(index + 1)'], {}), '(m, n, index + 1)\n', (12606, 12623), True, 'import matplotlib.pyplot as plt\n'), ((12632, 12684), 'networkx.draw', 'nx.draw', (['dgraph'], {'pos': 'all_positions', 'with_labels': '(True)'}), '(dgraph, pos=all_positions, with_labels=True)\n', (12639, 12684), True, 'import networkx as nx\n'), ((22949, 22973), 'networkx.to_undirected', 'nx.to_undirected', (['dgraph'], {}), '(dgraph)\n', (22965, 22973), True, 'import networkx as nx\n'), ((23475, 23497), 'numpy.real', 'np.real', (['balance_ratio'], {}), '(balance_ratio)\n', (23482, 23497), True, 'import numpy as np\n'), ((25450, 25474), 'networkx.to_undirected', 'nx.to_undirected', (['dgraph'], {}), '(dgraph)\n', (25466, 25474), True, 'import networkx as nx\n'), ((26016, 26031), 'numpy.real', 'np.real', (['result'], {}), '(result)\n', (26023, 26031), True, 'import numpy as np\n'), ((30379, 30412), 'itertools.permutations', 'itertools.permutations', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (30401, 30412), False, 'import itertools\n'), ((31571, 31604), 'itertools.permutations', 'itertools.permutations', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (31593, 31604), False, 'import itertools\n'), ((33421, 33454), 'itertools.permutations', 'itertools.permutations', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (33443, 33454), False, 'import itertools\n'), ((35962, 35995), 'itertools.permutations', 'itertools.permutations', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (35984, 35995), False, 'import itertools\n'), ((37113, 37146), 'itertools.permutations', 'itertools.permutations', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (37135, 37146), False, 'import itertools\n'), ((38358, 38391), 'itertools.permutations', 'itertools.permutations', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (38380, 38391), False, 'import itertools\n'), ((41240, 41283), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (41248, 41283), True, 'import numpy as np\n'), ((41312, 41355), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (41320, 41355), True, 'import numpy as np\n'), ((41384, 41427), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (41392, 41427), True, 'import numpy as np\n'), ((41456, 41499), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 0, 1], [1, 0, 1], [1, 0, 0]])\n', (41464, 41499), True, 'import numpy as np\n'), ((41529, 41572), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 0], [1, 1, 0]]'], {}), '([[0, 1, 1], [0, 0, 0], [1, 1, 0]])\n', (41537, 41572), True, 'import numpy as np\n'), ((41602, 41645), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]])\n', (41610, 41645), True, 'import numpy as np\n'), ((41675, 41718), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (41683, 41718), True, 'import numpy as np\n'), ((41748, 41791), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n', (41756, 41791), True, 'import numpy as np\n'), ((41821, 41864), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n', (41829, 41864), True, 'import numpy as np\n'), ((41893, 41936), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (41901, 41936), True, 'import numpy as np\n'), ((41966, 42009), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [0, 0, 0]])\n', (41974, 42009), True, 'import numpy as np\n'), ((42039, 42082), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 1, 0]])\n', (42047, 42082), True, 'import numpy as np\n'), ((42112, 42155), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n', (42120, 42155), True, 'import numpy as np\n'), ((42185, 42228), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (42193, 42228), True, 'import numpy as np\n'), ((42257, 42300), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 0, 0]])\n', (42265, 42300), True, 'import numpy as np\n'), ((42330, 42373), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [1, 1, 0]])\n', (42338, 42373), True, 'import numpy as np\n'), ((46613, 46659), 'numpy.zeros', 'np.zeros', (['(unique_triad_num, unique_triad_num)'], {}), '((unique_triad_num, unique_triad_num))\n', (46621, 46659), True, 'import numpy as np\n'), ((46799, 46850), 'utils.make_matrix_row_stochastic', 'utils.make_matrix_row_stochastic', (['transition_matrix'], {}), '(transition_matrix)\n', (46831, 46850), False, 'import utils\n'), ((51130, 51183), 'numpy.log', 'np.log', (['(1 / (pie_star * distance_from_stationary_eps))'], {}), '(1 / (pie_star * distance_from_stationary_eps))\n', (51136, 51183), True, 'import numpy as np\n'), ((52467, 52514), 'numpy.floor', 'np.floor', (['((binarized_adj + binarized_adj.T) / 2)'], {}), '((binarized_adj + binarized_adj.T) / 2)\n', (52475, 52514), True, 'import numpy as np\n'), ((52536, 52567), 'numpy.where', 'np.where', (['(both_double_edges > 0)'], {}), '(both_double_edges > 0)\n', (52544, 52567), True, 'import numpy as np\n'), ((53468, 53515), 'numpy.floor', 'np.floor', (['((binarized_adj + binarized_adj.T) / 2)'], {}), '((binarized_adj + binarized_adj.T) / 2)\n', (53476, 53515), True, 'import numpy as np\n'), ((53537, 53584), 'numpy.where', 'np.where', (['(binarized_adj - both_double_edges > 0)'], {}), '(binarized_adj - both_double_edges > 0)\n', (53545, 53584), True, 'import numpy as np\n'), ((57576, 57640), 'numpy.linalg.norm', 'np.linalg.norm', (['(avg_transition_matrix - transition_matrix)', 'lnorm'], {}), '(avg_transition_matrix - transition_matrix, lnorm)\n', (57590, 57640), True, 'import numpy as np\n'), ((57902, 57970), 'numpy.linalg.norm', 'np.linalg.norm', (['(avg_stationary_distribution - stationary_dist)', 'lnorm'], {}), '(avg_stationary_distribution - stationary_dist, lnorm)\n', (57916, 57970), True, 'import numpy as np\n'), ((58037, 58100), 'scipy.stats.pearsonr', 'sp.stats.pearsonr', (['avg_stationary_distribution', 'stationary_dist'], {}), '(avg_stationary_distribution, stationary_dist)\n', (58054, 58100), True, 'import scipy as sp\n'), ((61375, 61406), 'numpy.fill_diagonal', 'np.fill_diagonal', (['adj_matrix', '(0)'], {}), '(adj_matrix, 0)\n', (61391, 61406), True, 'import numpy as np\n'), ((61434, 61508), 'utils.adjacency2digraph', 'utils.adjacency2digraph', ([], {'adj_matrix': 'adj_matrix', 'similar_this_dgraph': 'dgraph'}), '(adj_matrix=adj_matrix, similar_this_dgraph=dgraph)\n', (61457, 61508), False, 'import utils\n'), ((2318, 2330), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (2328, 2330), True, 'import networkx as nx\n'), ((3865, 3894), 'datetime.timedelta', 'datetime.timedelta', (['(weeks * 7)'], {}), '(weeks * 7)\n', (3883, 3894), False, 'import datetime\n'), ((4839, 4859), 'networkx.adj_matrix', 'nx.adj_matrix', (['graph'], {}), '(graph)\n', (4852, 4859), True, 'import networkx as nx\n'), ((5066, 5083), 'numpy.sum', 'np.sum', (['out_edges'], {}), '(out_edges)\n', (5072, 5083), True, 'import numpy as np\n'), ((5118, 5134), 'numpy.sum', 'np.sum', (['in_edges'], {}), '(in_edges)\n', (5124, 5134), True, 'import numpy as np\n'), ((6647, 6676), 'datetime.timedelta', 'datetime.timedelta', (['(weeks * 7)'], {}), '(weeks * 7)\n', (6665, 6676), False, 'import datetime\n'), ((9244, 9265), 'numpy.where', 'np.where', (['(weights > 0)'], {}), '(weights > 0)\n', (9252, 9265), True, 'import numpy as np\n'), ((9302, 9323), 'numpy.where', 'np.where', (['(weights < 0)'], {}), '(weights < 0)\n', (9310, 9323), True, 'import numpy as np\n'), ((10045, 10070), 'numpy.where', 'np.where', (['(gcc_weights > 0)'], {}), '(gcc_weights > 0)\n', (10053, 10070), True, 'import numpy as np\n'), ((10111, 10136), 'numpy.where', 'np.where', (['(gcc_weights < 0)'], {}), '(gcc_weights < 0)\n', (10119, 10136), True, 'import numpy as np\n'), ((39324, 39363), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['mat01', '(0)', '(2)'], {}), '(mat01, 0, 2)\n', (39350, 39363), False, 'import utils\n'), ((39374, 39413), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['mat01', '(1)', '(2)'], {}), '(mat01, 1, 2)\n', (39400, 39413), False, 'import utils\n'), ((39424, 39463), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['mat02', '(0)', '(1)'], {}), '(mat02, 0, 1)\n', (39450, 39463), False, 'import utils\n'), ((39474, 39513), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['mat02', '(1)', '(2)'], {}), '(mat02, 1, 2)\n', (39500, 39513), False, 'import utils\n'), ((39524, 39563), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['mat12', '(0)', '(1)'], {}), '(mat12, 0, 1)\n', (39550, 39563), False, 'import utils\n'), ((39574, 39613), 'utils.swap_nodes_in_matrix', 'utils.swap_nodes_in_matrix', (['mat12', '(0)', '(2)'], {}), '(mat12, 0, 2)\n', (39600, 39613), False, 'import utils\n'), ((43976, 44018), 'numpy.array', 'np.array', (['triad_subgraph_matrix'], {'dtype': 'int'}), '(triad_subgraph_matrix, dtype=int)\n', (43984, 44018), True, 'import numpy as np\n'), ((48135, 48157), 'numpy.sum', 'np.sum', (['matrix'], {'axis': '(1)'}), '(matrix, axis=1)\n', (48141, 48157), True, 'import numpy as np\n'), ((49301, 49330), 'numpy.where', 'np.where', (['(eigen_values > 0.99)'], {}), '(eigen_values > 0.99)\n', (49309, 49330), True, 'import numpy as np\n'), ((50720, 50749), 'numpy.where', 'np.where', (['(eigen_values > 0.99)'], {}), '(eigen_values > 0.99)\n', (50728, 50749), True, 'import numpy as np\n'), ((52969, 53023), 'utils.swap_two_elements_in_matrix', 'utils.swap_two_elements_in_matrix', (['adj', 's1', 't1', 's1', 't2'], {}), '(adj, s1, t1, s1, t2)\n', (53002, 53023), False, 'import utils\n'), ((53036, 53090), 'utils.swap_two_elements_in_matrix', 'utils.swap_two_elements_in_matrix', (['adj', 't1', 's1', 't2', 's1'], {}), '(adj, t1, s1, t2, s1)\n', (53069, 53090), False, 'import utils\n'), ((53103, 53157), 'utils.swap_two_elements_in_matrix', 'utils.swap_two_elements_in_matrix', (['adj', 's2', 't2', 's2', 't1'], {}), '(adj, s2, t2, s2, t1)\n', (53136, 53157), False, 'import utils\n'), ((53170, 53224), 'utils.swap_two_elements_in_matrix', 'utils.swap_two_elements_in_matrix', (['adj', 't2', 's2', 't1', 's2'], {}), '(adj, t2, s2, t1, s2)\n', (53203, 53224), False, 'import utils\n'), ((53931, 53985), 'utils.swap_two_elements_in_matrix', 'utils.swap_two_elements_in_matrix', (['adj', 's1', 't1', 's1', 't2'], {}), '(adj, s1, t1, s1, t2)\n', (53964, 53985), False, 'import utils\n'), ((53998, 54052), 'utils.swap_two_elements_in_matrix', 'utils.swap_two_elements_in_matrix', (['adj', 's2', 't2', 's2', 't1'], {}), '(adj, s2, t2, s2, t1)\n', (54031, 54052), False, 'import utils\n'), ((3798, 3827), 'datetime.timedelta', 'datetime.timedelta', (['(weeks * 7)'], {}), '(weeks * 7)\n', (3816, 3827), False, 'import datetime\n'), ((6580, 6609), 'datetime.timedelta', 'datetime.timedelta', (['(weeks * 7)'], {}), '(weeks * 7)\n', (6598, 6609), False, 'import datetime\n'), ((15287, 15303), 'math.pow', 'math.pow', (['(10)', '(-6)'], {}), '(10, -6)\n', (15295, 15303), False, 'import math\n'), ((15340, 15356), 'math.pow', 'math.pow', (['(10)', '(-6)'], {}), '(10, -6)\n', (15348, 15356), False, 'import math\n'), ((51073, 51089), 'numpy.real', 'np.real', (['lambda2'], {}), '(lambda2)\n', (51080, 51089), True, 'import numpy as np\n'), ((8380, 8406), 'networkx.load_centrality', 'nx.load_centrality', (['dgraph'], {}), '(dgraph)\n', (8398, 8406), True, 'import networkx as nx\n'), ((8477, 8526), 'networkx.eigenvector_centrality', 'nx.eigenvector_centrality', (['dgraph'], {'max_iter': '(10000)'}), '(dgraph, max_iter=10000)\n', (8502, 8526), True, 'import networkx as nx\n'), ((8594, 8624), 'networkx.harmonic_centrality', 'nx.harmonic_centrality', (['dgraph'], {}), '(dgraph)\n', (8616, 8624), True, 'import networkx as nx\n'), ((8693, 8724), 'networkx.closeness_centrality', 'nx.closeness_centrality', (['dgraph'], {}), '(dgraph)\n', (8716, 8724), True, 'import networkx as nx\n'), ((8795, 8828), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['dgraph'], {}), '(dgraph)\n', (8820, 8828), True, 'import networkx as nx\n'), ((9501, 9535), 'networkx.clustering', 'nx.clustering', (['ugraph'], {'weight': 'None'}), '(ugraph, weight=None)\n', (9514, 9535), True, 'import networkx as nx\n'), ((9726, 9757), 'networkx.connected_components', 'nx.connected_components', (['ugraph'], {}), '(ugraph)\n', (9749, 9757), True, 'import networkx as nx\n'), ((11677, 11689), 'numpy.sign', 'np.sign', (['wij'], {}), '(wij)\n', (11684, 11689), True, 'import numpy as np\n'), ((11693, 11705), 'numpy.sign', 'np.sign', (['wji'], {}), '(wji)\n', (11700, 11705), True, 'import numpy as np\n'), ((23377, 23397), 'numpy.power', 'np.power', (['lambdas', '(3)'], {}), '(lambdas, 3)\n', (23385, 23397), True, 'import numpy as np\n'), ((23408, 23424), 'numpy.power', 'np.power', (['mus', '(3)'], {}), '(mus, 3)\n', (23416, 23424), True, 'import numpy as np\n'), ((27012, 27036), 'numpy.sign', 'np.sign', (['(xij * xik * xkj)'], {}), '(xij * xik * xkj)\n', (27019, 27036), True, 'import numpy as np\n'), ((44159, 44174), 'numpy.array', 'np.array', (['triad'], {}), '(triad)\n', (44167, 44174), True, 'import numpy as np\n'), ((40391, 40440), 'numpy.array', 'np.array', (['[[0, i1, i2], [i3, 0, i4], [i5, i6, 0]]'], {}), '([[0, i1, i2], [i3, 0, i4], [i5, i6, 0]])\n', (40399, 40440), True, 'import numpy as np\n'), ((44516, 44531), 'numpy.array', 'np.array', (['triad'], {}), '(triad)\n', (44524, 44531), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import cv2
from time import time
from firebase import *
from waste_index_calculator import *
from threading import Thread
import matplotlib.pyplot as plt
IMG_HEIGHT = 320
IMG_WIDTH = 320
tflite_path = 'new_model_0_75/my_model_fp32.tflite'
# tflite_path = 'new_model_0_75_-1to1/my_model_fp32_320x320.tflite'
tflite_interpreter = tf.lite.Interpreter(model_path=tflite_path)
tflite_interpreter.allocate_tensors()
tflite_input_details = tflite_interpreter.get_input_details()
tflite_output_details = tflite_interpreter.get_output_details()
cam = cv2.VideoCapture('../garbage_detection/vid3.mp4')
fps = 40#round(cam.get(cv2.CAP_PROP_FPS))
# writer = cv2.VideoWriter('segout_bb2.mp4',cv2.VideoWriter_fourcc(*'mp4v'), fps,(IMG_WIDTH,IMG_HEIGHT))
cv2.namedWindow("output", cv2.WINDOW_NORMAL)
cv2.resizeWindow("output", 600,600)
DET_COUNT = 0
start = time()
counter = 0
last_upload_t = 0
while True:
ret, img = cam.read()
if not ret:
print("video finished.")
break
# img = cv2.rotate(img,cv2.ROTATE_90_CLOCKWISE)
# img = np.ascontiguousarray(img)
oimg = cv2.resize(img, (IMG_HEIGHT, IMG_WIDTH)).astype(np.uint8)
# cv2.imshow("output",oimg)
img = cv2.cvtColor(oimg, cv2.COLOR_BGR2RGB).astype(np.float32)
img = img/127.5 - 1
tflite_interpreter.set_tensor(tflite_input_details[0]['index'], np.expand_dims(img, axis=0))
tflite_interpreter.invoke()
pred = tflite_interpreter.get_tensor(tflite_output_details[0]['index']).squeeze()
pred[pred<0.7] = 0
# pred[pred>=0.5] = 1
pred = (pred*255).astype(np.uint8)
mask = cv2.resize(pred, (IMG_WIDTH, IMG_HEIGHT))
# mult = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)#*img
# mult[:,:,0] = 0
# mult[:,:,2] = 0
# heatmap_img = mult
heatmap_img = cv2.applyColorMap(mask, cv2.COLORMAP_INFERNO)
heated = cv2.addWeighted(heatmap_img, 0.6, oimg, 1, 0)
cv2.putText(heated, str(fps)[:5]+" FPS", (0, 15),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
contours,_ = cv2.findContours(mask,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:
c = max(contours,key=cv2.contourArea)
cArea = cv2.contourArea(c)
if cArea > 3000:
DET_COUNT+= 1
#Getting the bounding rectangle
# x,y,w,h = cv2.boundingRect(c)
#Drawing the bounding rectangle
# cv2.rectangle(heated,(x,y),(x+w,y+h),(0,255,0),2)
# min rect
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(heated,[box],0,(0,255,0),2)
#Getting the moments
# m = cv2.moments(c)
#moving mouse to the centroid
# # cv2.drawContours(heated,contours,-1,(255,0,0),2)
# cv2.imshow("output",heated)
cv2.imshow("output", heated)
# cv2.imshow("output", np.concatenate((heated, heatmap_img), axis=1))
counter+=1
if (time() - start) > 1:
fps = counter / (time() - start)
# print("FPS:", fps)
counter = 0
start = time()
# writer.write(heated)
if DET_COUNT>=7:
DET_COUNT=0
latitude,longitude = GenerateRandomCoordinates()
himg = cv2.cvtColor(heated, cv2.COLOR_BGR2RGB)
show_time = time()
plt.imshow(himg)
plt.show()
last_upload_t += time() - show_time
if (time() - last_upload_t) > 2:
t1=Thread(target=add_data, args=(heated,latitude,longitude,cArea,))
t1.setDaemon(True)
t1.start()
last_upload_t = time()
print("[*] Uploading Image.")
key = cv2.waitKey(1) & 0xff
if key == ord('q'):
break
try:
t1.join()
add_indexes()
except:
pass
cv2.destroyAllWindows()
# cam.release()
# writer.release() | [
"cv2.imshow",
"cv2.destroyAllWindows",
"tensorflow.lite.Interpreter",
"matplotlib.pyplot.imshow",
"cv2.resizeWindow",
"cv2.contourArea",
"cv2.minAreaRect",
"cv2.addWeighted",
"cv2.waitKey",
"cv2.drawContours",
"cv2.boxPoints",
"numpy.int0",
"cv2.cvtColor",
"cv2.resize",
"time.time",
"c... | [((373, 416), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': 'tflite_path'}), '(model_path=tflite_path)\n', (392, 416), True, 'import tensorflow as tf\n'), ((588, 637), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""../garbage_detection/vid3.mp4"""'], {}), "('../garbage_detection/vid3.mp4')\n", (604, 637), False, 'import cv2\n'), ((786, 830), 'cv2.namedWindow', 'cv2.namedWindow', (['"""output"""', 'cv2.WINDOW_NORMAL'], {}), "('output', cv2.WINDOW_NORMAL)\n", (801, 830), False, 'import cv2\n'), ((831, 867), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""output"""', '(600)', '(600)'], {}), "('output', 600, 600)\n", (847, 867), False, 'import cv2\n'), ((890, 896), 'time.time', 'time', ([], {}), '()\n', (894, 896), False, 'from time import time\n'), ((3392, 3415), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3413, 3415), False, 'import cv2\n'), ((1570, 1611), 'cv2.resize', 'cv2.resize', (['pred', '(IMG_WIDTH, IMG_HEIGHT)'], {}), '(pred, (IMG_WIDTH, IMG_HEIGHT))\n', (1580, 1611), False, 'import cv2\n'), ((1741, 1786), 'cv2.applyColorMap', 'cv2.applyColorMap', (['mask', 'cv2.COLORMAP_INFERNO'], {}), '(mask, cv2.COLORMAP_INFERNO)\n', (1758, 1786), False, 'import cv2\n'), ((1797, 1842), 'cv2.addWeighted', 'cv2.addWeighted', (['heatmap_img', '(0.6)', 'oimg', '(1)', '(0)'], {}), '(heatmap_img, 0.6, oimg, 1, 0)\n', (1812, 1842), False, 'import cv2\n'), ((1954, 2016), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1970, 2016), False, 'import cv2\n'), ((2613, 2641), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'heated'], {}), "('output', heated)\n", (2623, 2641), False, 'import cv2\n'), ((1342, 1369), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1356, 1369), True, 'import numpy as np\n'), ((2089, 2107), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (2104, 2107), False, 'import cv2\n'), ((2833, 2839), 'time.time', 'time', ([], {}), '()\n', (2837, 2839), False, 'from time import time\n'), ((2956, 2995), 'cv2.cvtColor', 'cv2.cvtColor', (['heated', 'cv2.COLOR_BGR2RGB'], {}), '(heated, cv2.COLOR_BGR2RGB)\n', (2968, 2995), False, 'import cv2\n'), ((3010, 3016), 'time.time', 'time', ([], {}), '()\n', (3014, 3016), False, 'from time import time\n'), ((3019, 3035), 'matplotlib.pyplot.imshow', 'plt.imshow', (['himg'], {}), '(himg)\n', (3029, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3038, 3048), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3046, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3295, 3309), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3306, 3309), False, 'import cv2\n'), ((1102, 1142), 'cv2.resize', 'cv2.resize', (['img', '(IMG_HEIGHT, IMG_WIDTH)'], {}), '(img, (IMG_HEIGHT, IMG_WIDTH))\n', (1112, 1142), False, 'import cv2\n'), ((1199, 1236), 'cv2.cvtColor', 'cv2.cvtColor', (['oimg', 'cv2.COLOR_BGR2RGB'], {}), '(oimg, cv2.COLOR_BGR2RGB)\n', (1211, 1236), False, 'import cv2\n'), ((2328, 2346), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (2343, 2346), False, 'import cv2\n'), ((2356, 2375), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (2369, 2375), False, 'import cv2\n'), ((2385, 2397), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (2392, 2397), True, 'import numpy as np\n'), ((2401, 2451), 'cv2.drawContours', 'cv2.drawContours', (['heated', '[box]', '(0)', '(0, 255, 0)', '(2)'], {}), '(heated, [box], 0, (0, 255, 0), 2)\n', (2417, 2451), False, 'import cv2\n'), ((2730, 2736), 'time.time', 'time', ([], {}), '()\n', (2734, 2736), False, 'from time import time\n'), ((3068, 3074), 'time.time', 'time', ([], {}), '()\n', (3072, 3074), False, 'from time import time\n'), ((3128, 3194), 'threading.Thread', 'Thread', ([], {'target': 'add_data', 'args': '(heated, latitude, longitude, cArea)'}), '(target=add_data, args=(heated, latitude, longitude, cArea))\n', (3134, 3194), False, 'from threading import Thread\n'), ((3248, 3254), 'time.time', 'time', ([], {}), '()\n', (3252, 3254), False, 'from time import time\n'), ((2770, 2776), 'time.time', 'time', ([], {}), '()\n', (2774, 2776), False, 'from time import time\n'), ((3093, 3099), 'time.time', 'time', ([], {}), '()\n', (3097, 3099), False, 'from time import time\n')] |
import glob
import os.path
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
BOTTLENECK_TENSOR_SIZE = 2048
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
MODEL_DIR = './inception_dec_2015'
MODEL_FILE = 'tensorflow_inception_graph.pb'
CACKE_DIR = './my_bottleneck'
INPUT_DATA = 'F:/dataset'
VALIDATION_PERCENTAGE = 10
TEST_PERCENTAGE = 10
LEARNING_RATE = 0.01
STEPS = 2000
BATCH = 100
def create_image_lists(testing_percentage, validation_percentage):
result = {}
sub_dirs = [x[0] for x in os.walk(INPUT_DATA)]
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'png']
file_list = []
dir_name = os.path.basename(sub_dir)
for extension in extensions:
file_glob = os.path.join(INPUT_DATA, dir_name, '*.'+extension)
file_list.extend(glob.glob(file_glob))
if not file_list: continue
label_name = dir_name.lower()
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
chance = np.random.randint(100)
if chance < validation_percentage:
validation_images.append(base_name)
elif chance < (validation_percentage + testing_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] ={
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, image_dir, label_name, index, category):
label_lists = image_lists[label_name]
category_list = label_lists[category]
mod_index = index%len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, category):
return get_image_path(image_lists, CACKE_DIR, label_name, index, category)
def run_bottleneck_on_image(sess, image_data, image_data_tensor, bottleneck_tensor):
bottleneck_values = sess.run(bottleneck_tensor, {image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def get_or_create_bottleneck(sess, image_lists, label_name, index,
category, jpeg_data_tensor, bottleneck_tensor):
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(CACKE_DIR, sub_dir)
if not os.path.exists(sub_dir_path):
os.makedirs(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index, category)
if not os.path.exists(bottleneck_path):
image_path = get_image_path(image_lists, INPUT_DATA, label_name, index, category)
image_data = gfile.FastGFile(image_path, 'rb').read()
# print(np.shape(image_data))
bottleneck_values = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor,bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
else:
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def get_random_cached_bottlenecks(sess, n_classes, image_lists, how_many,
category, jpeg_data_tensor, bottleneck_tensor):
bottlenecks = []
ground_truths = []
for _ in range(how_many):
label_index = random.randrange(n_classes)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(65536)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name, image_index
, category, jpeg_data_tensor, bottleneck_tensor)
ground_truth = np.zeros(n_classes, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)# data
ground_truths.append(ground_truth)# label
return bottlenecks, ground_truths
def get_te_bottlenecks(sess, image_lists, n_classes, jpeg_data_tensor, bottleneck_tensor):
bottlenecks = []
ground_truths = []
label_name_list = list(image_lists.keys())
for label_index, label_name in enumerate(label_name_list):
category = 'testing'
for index, unused_base_name in enumerate(image_lists[label_name][category]):
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name, index
, category, jpeg_data_tensor, bottleneck_tensor)
ground_truth = np.zeros(n_classes, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)# label
return bottlenecks, ground_truths
def main(argv=None):
image_lists = create_image_lists(TEST_PERCENTAGE, VALIDATION_PERCENTAGE)
n_classes = len(image_lists.keys())
print('classes',n_classes)
with gfile.FastGFile(os.path.join(MODEL_DIR, MODEL_FILE), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def(graph_def,
return_elements=[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME])
bottleneck_input = tf.placeholder(
tf.float32, [None, BOTTLENECK_TENSOR_SIZE], name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32, [None, n_classes], name='GroundTruthInput')
with tf.name_scope('final_training_ops'):
weights = tf.Variable(tf.truncated_normal(
[BOTTLENECK_TENSOR_SIZE, n_classes], stddev=0.1))
biases = tf.Variable(tf.zeros([n_classes]))
logits = tf.matmul(bottleneck_input, weights)+biases
final_tensor = tf.nn.softmax(logits)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=ground_truth_input)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy_mean)
with tf.name_scope('evaluation'):
correct_prediction = tf.equal(tf.argmax(final_tensor, 1), tf.argmax(ground_truth_input, 1))
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
init = tf.global_variables_initializer()
sess.run(init)
print('w before:', weights)
print(sess.run(weights))
print('b before:', biases)
print(sess.run(biases))
for i in range(STEPS):
train_bottlenecks, train_ground_truth = \
get_random_cached_bottlenecks(sess, n_classes,
image_lists, BATCH,
'training',
jpeg_data_tensor,
bottleneck_tensor)
sess.run(train_step, feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
if i % 100 == 0 or i + 1 == STEPS:
validation_bottlenecks, validation_ground_truth =\
get_random_cached_bottlenecks(sess, n_classes,
image_lists, BATCH,
'validation',
jpeg_data_tensor,
bottleneck_tensor)
validation_acc = sess.run(evaluation_step, feed_dict={
bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth})
print('Step %d: Valdation acc on random sampled %d examples = %.1f%%'%
(i, BATCH, validation_acc*100))
print('w after', weights)
print(sess.run(weights))
print('b after', biases)
print(sess.run(biases))
saver.save(sess, './checkpoint_dir/Mal_Detection_Inception_AE_training_3')
test_bottlenecks, test_ground_truth = \
get_te_bottlenecks(sess, image_lists, n_classes, jpeg_data_tensor, bottleneck_tensor)
test_acc = sess.run(evaluation_step, feed_dict={
bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth})
print("Final test acc = %.1f%%" % (test_acc*100))
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.app.run",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.matmul",
"tensorflow.ConfigProto",
"tensorflow.zeros",
"glob.glob",
"random.randrange",
"numpy.squeeze",
"tensorflo... | [((2643, 2672), 'numpy.squeeze', 'np.squeeze', (['bottleneck_values'], {}), '(bottleneck_values)\n', (2653, 2672), True, 'import numpy as np\n'), ((5976, 6075), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'return_elements': '[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME]'}), '(graph_def, return_elements=[BOTTLENECK_TENSOR_NAME,\n JPEG_DATA_TENSOR_NAME])\n', (5995, 6075), True, 'import tensorflow as tf\n'), ((6141, 6239), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, BOTTLENECK_TENSOR_SIZE]'], {'name': '"""BottleneckInputPlaceholder"""'}), "(tf.float32, [None, BOTTLENECK_TENSOR_SIZE], name=\n 'BottleneckInputPlaceholder')\n", (6155, 6239), True, 'import tensorflow as tf\n'), ((6273, 6343), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n_classes]'], {'name': '"""GroundTruthInput"""'}), "(tf.float32, [None, n_classes], name='GroundTruthInput')\n", (6287, 6343), True, 'import tensorflow as tf\n'), ((6698, 6787), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'ground_truth_input'}), '(logits=logits, labels=\n ground_truth_input)\n', (6740, 6787), True, 'import tensorflow as tf\n'), ((6811, 6840), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (6825, 6840), True, 'import tensorflow as tf\n'), ((7181, 7197), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7195, 7197), True, 'import tensorflow as tf\n'), ((7212, 7228), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (7226, 7228), True, 'import tensorflow as tf\n'), ((9568, 9580), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (9578, 9580), True, 'import tensorflow as tf\n'), ((4204, 4231), 'random.randrange', 'random.randrange', (['n_classes'], {}), '(n_classes)\n', (4220, 4231), False, 'import random\n'), ((4317, 4340), 'random.randrange', 'random.randrange', (['(65536)'], {}), '(65536)\n', (4333, 4340), False, 'import random\n'), ((4555, 4592), 'numpy.zeros', 'np.zeros', (['n_classes'], {'dtype': 'np.float32'}), '(n_classes, dtype=np.float32)\n', (4563, 4592), True, 'import numpy as np\n'), ((5872, 5885), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (5883, 5885), True, 'import tensorflow as tf\n'), ((6356, 6391), 'tensorflow.name_scope', 'tf.name_scope', (['"""final_training_ops"""'], {}), "('final_training_ops')\n", (6369, 6391), True, 'import tensorflow as tf\n'), ((6653, 6674), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (6666, 6674), True, 'import tensorflow as tf\n'), ((6951, 6978), 'tensorflow.name_scope', 'tf.name_scope', (['"""evaluation"""'], {}), "('evaluation')\n", (6964, 6978), True, 'import tensorflow as tf\n'), ((7345, 7370), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (7355, 7370), True, 'import tensorflow as tf\n'), ((7396, 7429), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7427, 7429), True, 'import tensorflow as tf\n'), ((1373, 1395), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (1390, 1395), True, 'import numpy as np\n'), ((5370, 5407), 'numpy.zeros', 'np.zeros', (['n_classes'], {'dtype': 'np.float32'}), '(n_classes, dtype=np.float32)\n', (5378, 5407), True, 'import numpy as np\n'), ((6426, 6494), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[BOTTLENECK_TENSOR_SIZE, n_classes]'], {'stddev': '(0.1)'}), '([BOTTLENECK_TENSOR_SIZE, n_classes], stddev=0.1)\n', (6445, 6494), True, 'import tensorflow as tf\n'), ((6540, 6561), 'tensorflow.zeros', 'tf.zeros', (['[n_classes]'], {}), '([n_classes])\n', (6548, 6561), True, 'import tensorflow as tf\n'), ((6583, 6619), 'tensorflow.matmul', 'tf.matmul', (['bottleneck_input', 'weights'], {}), '(bottleneck_input, weights)\n', (6592, 6619), True, 'import tensorflow as tf\n'), ((6861, 6909), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['LEARNING_RATE'], {}), '(LEARNING_RATE)\n', (6894, 6909), True, 'import tensorflow as tf\n'), ((7021, 7047), 'tensorflow.argmax', 'tf.argmax', (['final_tensor', '(1)'], {}), '(final_tensor, 1)\n', (7030, 7047), True, 'import tensorflow as tf\n'), ((7049, 7081), 'tensorflow.argmax', 'tf.argmax', (['ground_truth_input', '(1)'], {}), '(ground_truth_input, 1)\n', (7058, 7081), True, 'import tensorflow as tf\n'), ((7127, 7166), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (7134, 7166), True, 'import tensorflow as tf\n'), ((1065, 1085), 'glob.glob', 'glob.glob', (['file_glob'], {}), '(file_glob)\n', (1074, 1085), False, 'import glob\n'), ((3317, 3350), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['image_path', '"""rb"""'], {}), "(image_path, 'rb')\n", (3332, 3350), False, 'from tensorflow.python.platform import gfile\n')] |
import sys
sys.path.extend(["../../", "../", "./"])
import warnings
warnings.filterwarnings("ignore")
from sklearn.preprocessing import MinMaxScaler
import gensim
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.model_selection import cross_val_predict
import time
import argparse
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from gensim.models import word2vec
from sklearn.externals import joblib
# 将fatsa文件切分成单词默认为kmer切分
def save_wordfile(fastafile, splite, kmer):
train_words = []
for i in fastafile:
f = open(i)
k = kmer - 1
documents = f.readlines()
string = ""
flag = 0
for document in documents:
if document.startswith(">") and flag == 0:
flag = 1
continue
elif document.startswith(">") and flag == 1:
if splite == 0:
b = [string[i:i + kmer] for i in range(len(string)) if i < len(string) - k]
else:
b = [string[i:i + kmer] for i in range(0, len(string), kmer) if i < len(string) - k]
train_words.append(b)
string = ""
else:
string += document
string = string.strip()
if splite == 0:
b = [string[i:i + kmer] for i in range(len(string)) if i < len(string) - k]
else:
b = [string[i:i + kmer] for i in range(0, len(string), kmer) if i < len(string) - k]
train_words.append(b)
f.close()
return train_words
def splite_word(trainfasta_file, kmer, splite):
train_file = trainfasta_file
# train set transform to word
word = save_wordfile(train_file, splite, kmer)
return word
# 训练词向量并将文件转化为csv文件
def save_csv(words, model, b):
wv = model.wv
vocab_list = wv.index2word
feature = []
for word in words:
l = []
for i in word:
i = i.strip()
if i not in vocab_list:
flag = [b] * 100
else:
flag = model[i]
l.append(flag)
word_vec = np.array(l)
feature.append(np.mean(word_vec, axis=0))
return np.array(feature)
def tocsv(train_word, sg, hs, window, size, model_name, b, iter1, spmodel):
if spmodel:
print("loading model ......")
model = gensim.models.KeyedVectors.load_word2vec_format(spmodel, binary=False)
else:
model = word2vec.Word2Vec(train_word, iter=iter1, sg=sg, hs=hs, min_count=1, window=window, size=size)
model.wv.save_word2vec_format(model_name, binary=False)
csv = save_csv(train_word, model, b)
return csv
# svm
def svm(traincsv, train_y, cv, n_job, mms, ss, grad, model):
cv = cv
cpu_num = n_job
svc = SVC(probability=True)
X = traincsv
y = train_y
if mms:
print("MinMaxScaler")
minMax = MinMaxScaler()
minMax.fit(X)
X = minMax.transform(X)
if ss:
print("StandardScaler")
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
# 网格搜索
def get_bestparameter(X, y):
a = [2 ** x for x in range(-2, 5)]
b = [2 ** x for x in range(-5, 2)]
parameters = [
{
'C': a,
'gamma': b,
'kernel': ['rbf']
},
{
'C': a,
'kernel': ['linear']
}
]
clf = GridSearchCV(svc, parameters, cv=cv, scoring='accuracy', n_jobs=cpu_num)
clf.fit(X, y)
print("Best parameters set found on development set:")
print(clf.best_params_)
print(clf.best_score_)
return clf
if grad:
clf = get_bestparameter(X, y)
p = clf.best_params_
if clf.best_params_["kernel"] == "rbf":
clf = SVC(C=p["C"], kernel=p["kernel"], gamma=p["gamma"], probability=True)
else:
clf = SVC(C=p["C"], kernel=p["kernel"], probability=True)
else:
clf = SVC(C=0.5, gamma=0.05, probability=True)
if cv:
print("------------------------cv--------------------------")
predicted = cross_val_predict(clf, X, y, cv=cv, n_jobs=cpu_num)
# y_predict_prob = cross_val_predict(clf, X, y, cv=cv, n_jobs=cpu_num, method='predict_proba')
# ROC_AUC_area = metrics.roc_auc_score(y, y_predict_prob[:, 1])
# print("AUC:{}".format(ROC_AUC_area))
print("ACC:{}".format(metrics.accuracy_score(y, predicted)))
print("MCC:{}\n".format(metrics.matthews_corrcoef(y, predicted)))
print(classification_report(y, predicted))
print("confusion matrix\n")
print(pd.crosstab(pd.Series(y, name='Actual'), pd.Series(predicted, name='Predicted')))
else:
clf.fit(X, y)
joblib.dump(clf, model)
# main
def main():
parser = argparse.ArgumentParser()
# train set
parser.add_argument('-i', required=True, nargs="+", help="trainfasta file name")
parser.add_argument('-o', required=True, help="model")
# word2vec
parser.add_argument('-b', default=0, help="Fill in the vector")
parser.add_argument('-sg', type=int, default=1, help="")
parser.add_argument('-iter', type=int, default=5, help="")
parser.add_argument('-hs', type=int, default=0, help="")
parser.add_argument('-premodel', help="pretrainmodel")
parser.add_argument('-window_size', type=int, default=20, help="window size")
parser.add_argument('-model_name', default="model.model", help="embedding model")
parser.add_argument('-hidden_size', type=int, default=100, help="The dimension of word")
# svm
parser.add_argument('-mms', type=bool, default=False, help="minmaxscaler")
parser.add_argument('-ss', type=bool, default=False, help="StandardScaler")
parser.add_argument('-cv', type=int, help="cross validation")
parser.add_argument('-n_job', '-n', default=-1, help="num of thread")
parser.add_argument('-grad', type=bool, default=False, help="grad")
# splite
parser.add_argument('-kmer', '-k', type=int, default=3, help="k-mer: k size")
parser.add_argument('-splite', '-s', type=int, default=0, help="kmer splite(0) or normal splite(1)")
args = parser.parse_args()
print(args)
if args.splite == 0:
print("kmer splite !")
else:
print("normal splite !")
y = []
for i in args.i:
f = open(i).readlines()
y.append(int(len(f) / 2))
print(y)
num_y = len(args.i)
train_y = []
for i in range(num_y):
train_y += [i] * y[i]
start_time = time.time()
train_word = splite_word(args.i, args.kmer, args.splite)
csv = tocsv(train_word, args.sg, args.hs, args.window_size, args.hidden_size, args.model_name,
args.b, args.iter, args.premodel)
svm(csv, train_y, args.cv, args.n_job, args.mms, args.ss, args.grad, args.o)
end_time = time.time()
print("end ............................")
print("Time consuming:{}s\n".format(end_time - start_time))
if __name__ == '__main__':
main()
| [
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.GridSearchCV",
"numpy.mean",
"gensim.models.word2vec.Word2Vec",
"pandas.Series",
"argparse.ArgumentParser",
"sklearn.metrics.classification_report",
"sklearn.metrics.matthews_corrcoef",
"gensim.models.KeyedVectors.load_word2vec_format",
"n... | [((14, 54), 'sys.path.extend', 'sys.path.extend', (["['../../', '../', './']"], {}), "(['../../', '../', './'])\n", (29, 54), False, 'import sys\n'), ((73, 106), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (96, 106), False, 'import warnings\n'), ((2402, 2419), 'numpy.array', 'np.array', (['feature'], {}), '(feature)\n', (2410, 2419), True, 'import numpy as np\n'), ((3015, 3036), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (3018, 3036), False, 'from sklearn.svm import SVC\n'), ((5191, 5216), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5214, 5216), False, 'import argparse\n'), ((6965, 6976), 'time.time', 'time.time', ([], {}), '()\n', (6974, 6976), False, 'import time\n'), ((7296, 7307), 'time.time', 'time.time', ([], {}), '()\n', (7305, 7307), False, 'import time\n'), ((2327, 2338), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (2335, 2338), True, 'import numpy as np\n'), ((2576, 2646), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['spmodel'], {'binary': '(False)'}), '(spmodel, binary=False)\n', (2623, 2646), False, 'import gensim\n'), ((2675, 2774), 'gensim.models.word2vec.Word2Vec', 'word2vec.Word2Vec', (['train_word'], {'iter': 'iter1', 'sg': 'sg', 'hs': 'hs', 'min_count': '(1)', 'window': 'window', 'size': 'size'}), '(train_word, iter=iter1, sg=sg, hs=hs, min_count=1, window\n =window, size=size)\n', (2692, 2774), False, 'from gensim.models import word2vec\n'), ((3138, 3152), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3150, 3152), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3274, 3290), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3288, 3290), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3748, 3820), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svc', 'parameters'], {'cv': 'cv', 'scoring': '"""accuracy"""', 'n_jobs': 'cpu_num'}), "(svc, parameters, cv=cv, scoring='accuracy', n_jobs=cpu_num)\n", (3760, 3820), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4332, 4372), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(0.5)', 'gamma': '(0.05)', 'probability': '(True)'}), '(C=0.5, gamma=0.05, probability=True)\n', (4335, 4372), False, 'from sklearn.svm import SVC\n'), ((4477, 4528), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['clf', 'X', 'y'], {'cv': 'cv', 'n_jobs': 'cpu_num'}), '(clf, X, y, cv=cv, n_jobs=cpu_num)\n', (4494, 4528), False, 'from sklearn.model_selection import cross_val_predict\n'), ((5128, 5151), 'sklearn.externals.joblib.dump', 'joblib.dump', (['clf', 'model'], {}), '(clf, model)\n', (5139, 5151), False, 'from sklearn.externals import joblib\n'), ((2363, 2388), 'numpy.mean', 'np.mean', (['word_vec'], {'axis': '(0)'}), '(word_vec, axis=0)\n', (2370, 2388), True, 'import numpy as np\n'), ((4150, 4219), 'sklearn.svm.SVC', 'SVC', ([], {'C': "p['C']", 'kernel': "p['kernel']", 'gamma': "p['gamma']", 'probability': '(True)'}), "(C=p['C'], kernel=p['kernel'], gamma=p['gamma'], probability=True)\n", (4153, 4219), False, 'from sklearn.svm import SVC\n'), ((4254, 4305), 'sklearn.svm.SVC', 'SVC', ([], {'C': "p['C']", 'kernel': "p['kernel']", 'probability': '(True)'}), "(C=p['C'], kernel=p['kernel'], probability=True)\n", (4257, 4305), False, 'from sklearn.svm import SVC\n'), ((4914, 4949), 'sklearn.metrics.classification_report', 'classification_report', (['y', 'predicted'], {}), '(y, predicted)\n', (4935, 4949), False, 'from sklearn.metrics import classification_report\n'), ((4785, 4821), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y', 'predicted'], {}), '(y, predicted)\n', (4807, 4821), False, 'from sklearn import metrics\n'), ((4857, 4896), 'sklearn.metrics.matthews_corrcoef', 'metrics.matthews_corrcoef', (['y', 'predicted'], {}), '(y, predicted)\n', (4882, 4896), False, 'from sklearn import metrics\n'), ((5015, 5042), 'pandas.Series', 'pd.Series', (['y'], {'name': '"""Actual"""'}), "(y, name='Actual')\n", (5024, 5042), True, 'import pandas as pd\n'), ((5044, 5082), 'pandas.Series', 'pd.Series', (['predicted'], {'name': '"""Predicted"""'}), "(predicted, name='Predicted')\n", (5053, 5082), True, 'import pandas as pd\n')] |
#! /usr/bin/python
# -*-coding: utf-8-*-
from keras.datasets import mnist
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import matplotlib.pyplot as plt
# 内置load_data() 多次加载数据都是失败 于是下载数据后 自定义方法
def load_data(path="MNIST_data/mnist.npz"):
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
# 训练模型 Start
# 构建序贯模型
def train():
model = Sequential()
model.add(Dense(500,input_shape=(784,), activation="relu")) # 输入层, 28*28=784
model.add(Dropout(0.3)) # 30% dropout
model.add(Dense(300, activation="relu")) # 隐藏层, 300
model.add(Dropout(0.3)) # 30% dropout
model.add(Dense(10))
model.add(Activation('softmax'))
# 编译模型
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # 随机梯度下降SGD ?momentum 暂未理解什么意思 =。=
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
def run():
(x_train, y_train), (x_test, y_test) = load_data()
X_train = x_train.reshape(x_train.shape[0], x_train.shape[1] * x_train.shape[2])
X_test = x_test.reshape(x_test.shape[0], x_test.shape[1] * x_test.shape[2])
Y_train = (np.arange(10) == y_train[:, None]).astype(int)
Y_test = (np.arange(10) == y_test[:, None]).astype(int)
model = train()
model.fit(X_train, Y_train, batch_size=200, epochs=30, shuffle=True, verbose=1, validation_split=0.3)
print("Start Test.....\n")
scores = model.evaluate(X_test, Y_test, batch_size=200, verbose=1)
print("The Test Loss: %f" % scores[0])
if __name__ == "__main__":
run()
# load_data()
# mnist.load_data() | [
"keras.models.Sequential",
"keras.optimizers.SGD",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.load",
"keras.layers.Dropout",
"numpy.arange"
] | [((344, 357), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (351, 357), True, 'import numpy as np\n'), ((564, 576), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (574, 576), False, 'from keras.models import Sequential\n'), ((888, 942), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (891, 942), False, 'from keras.optimizers import SGD\n'), ((591, 640), 'keras.layers.Dense', 'Dense', (['(500)'], {'input_shape': '(784,)', 'activation': '"""relu"""'}), "(500, input_shape=(784,), activation='relu')\n", (596, 640), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((673, 685), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (680, 685), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((717, 746), 'keras.layers.Dense', 'Dense', (['(300)'], {'activation': '"""relu"""'}), "(300, activation='relu')\n", (722, 746), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((774, 786), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (781, 786), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((818, 827), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (823, 827), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((843, 864), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (853, 864), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1334, 1347), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1343, 1347), True, 'import numpy as np\n'), ((1395, 1408), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1404, 1408), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 23:23:26 2018
@author: user
"""
# importing
import numpy as np
#%% numpy basics
array = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) # 1*15 vector
print(array.shape)
a = array.reshape(3,5)
#%%
print("shape: ",a.shape)
print("dimension: ", a.ndim)
print("data type: ",a.dtype.name)
print("size: ",a.size) #largest size in matrix
print("type: ",type(a))
#%%
array1 = np.array([[1,2,3,4],[5,6,7,8],[9,8,7,5]])
zeros = np.zeros((3,4))
zeros[0,0] = 5
print(zeros)
np.ones((3,4))
np.empty((2,3))
a = np.arange(10,50,5)
print(a)
a = np.linspace(10,50,20)
print(a)
# %% numpy basic operations
a = np.array([1,2,3])
b = np.array([4,5,6])
print(a+b)
print(a-b)
print(a**2)
print(np.sin(a))
print(a<2)
#%%
a = np.array([[1,2,3],[4,5,6]])
b = np.array([[1,2,3],[4,5,6]])
# element wise prodcut
print(a*b)
# matrix prodcut
a.dot(b.T)
print(np.exp(a))
#%%
a = np.random.random((5,5))
print(a.sum())
print(a.max())
print(a.min())
print(a.sum(axis=0))#sum of coloums
print(a.sum(axis=1))#sum of rows
print(np.sqrt(a))
print(np.square(a)) # a**2
print(np.add(a,a))# same as a+a+
# %% indexing and slicing
import numpy as np
array = np.array([1,2,3,4,5,6,7]) # vector dimension = 1
print(array[0])
print(array[0:4])
reverse_array = array[::-1]
print(reverse_array)
array1 = np.array([[1,2,3,4,5],[6,7,8,9,10]])
print(array1[1,1])
print(array1[:,1])
print(array1[1,1:4])
print(array1[-1,:])
print(array1[:,-1])
# %%
# shape manipulation
array = np.array([[1,2,3],[4,5,6]])
# flatten
a = array.ravel() #put it in a vector shape
array2 = a.reshape(3,2)#changing shape but not saving it in array2
array2 = a.reshape(3,-1) # -1=2 eşit oluyor. size ına tamamlıyor.
array2.resize(3,2)#chaning size and saving on array2
arrayT = array2.T
print(arrayT.shape)
array5 = np.array([[1,2],[3,4],[4,5]])
#array5 = np.column_stack((array1,array1))
# %% stacking arrays
array1 = np.array([[1,2],[3,4]])
array2 = np.array([[-1,-2],[-3,-4]])
# veritical
#array([[1, 2],
# [3, 4]])
#array([[-1, -2],
# [-3, -4]])
array3 = np.vstack((array1,array2))
# horizontal
#array([[1, 2],[-1, -2],
# [3, 4]],[-3, -4]]
array4 = np.hstack((array1,array2))
#%% convert and copy
liste = [1,2,3,4] # list
array = np.array(liste) #np.array
liste2 = list(array)
a = np.array([1,2,3]) # allocated [1,2,3] in memory and a is the pointer to that location
b = a # b pointing that locaiton too
b[0] = 5
c = a # c pointing that locaiton too
d = np.array([1,2,3])
e = d.copy() #pointing its copy so wont change main array in memory
f = d.copy()
#python passes mutable object as references, so function calls make no copy
#a is b returns true is they are same object
# id(a) returns a's id
| [
"numpy.sqrt",
"numpy.ones",
"numpy.add",
"numpy.hstack",
"numpy.random.random",
"numpy.square",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.empty",
"numpy.vstack",
"numpy.sin",
"numpy.arange"
] | [((142, 203), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n', (150, 203), True, 'import numpy as np\n'), ((427, 479), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [5, 6, 7, 8], [9, 8, 7, 5]]'], {}), '([[1, 2, 3, 4], [5, 6, 7, 8], [9, 8, 7, 5]])\n', (435, 479), True, 'import numpy as np\n'), ((478, 494), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (486, 494), True, 'import numpy as np\n'), ((524, 539), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (531, 539), True, 'import numpy as np\n'), ((540, 556), 'numpy.empty', 'np.empty', (['(2, 3)'], {}), '((2, 3))\n', (548, 556), True, 'import numpy as np\n'), ((561, 581), 'numpy.arange', 'np.arange', (['(10)', '(50)', '(5)'], {}), '(10, 50, 5)\n', (570, 581), True, 'import numpy as np\n'), ((594, 617), 'numpy.linspace', 'np.linspace', (['(10)', '(50)', '(20)'], {}), '(10, 50, 20)\n', (605, 617), True, 'import numpy as np\n'), ((661, 680), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (669, 680), True, 'import numpy as np\n'), ((683, 702), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (691, 702), True, 'import numpy as np\n'), ((775, 807), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (783, 807), True, 'import numpy as np\n'), ((807, 839), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (815, 839), True, 'import numpy as np\n'), ((925, 949), 'numpy.random.random', 'np.random.random', (['(5, 5)'], {}), '((5, 5))\n', (941, 949), True, 'import numpy as np\n'), ((1202, 1233), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7]'], {}), '([1, 2, 3, 4, 5, 6, 7])\n', (1210, 1233), True, 'import numpy as np\n'), ((1351, 1396), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]'], {}), '([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n', (1359, 1396), True, 'import numpy as np\n'), ((1528, 1560), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (1536, 1560), True, 'import numpy as np\n'), ((1849, 1883), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [4, 5]]'], {}), '([[1, 2], [3, 4], [4, 5]])\n', (1857, 1883), True, 'import numpy as np\n'), ((1957, 1983), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1965, 1983), True, 'import numpy as np\n'), ((1990, 2020), 'numpy.array', 'np.array', (['[[-1, -2], [-3, -4]]'], {}), '([[-1, -2], [-3, -4]])\n', (1998, 2020), True, 'import numpy as np\n'), ((2110, 2137), 'numpy.vstack', 'np.vstack', (['(array1, array2)'], {}), '((array1, array2))\n', (2119, 2137), True, 'import numpy as np\n'), ((2212, 2239), 'numpy.hstack', 'np.hstack', (['(array1, array2)'], {}), '((array1, array2))\n', (2221, 2239), True, 'import numpy as np\n'), ((2298, 2313), 'numpy.array', 'np.array', (['liste'], {}), '(liste)\n', (2306, 2313), True, 'import numpy as np\n'), ((2351, 2370), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2359, 2370), True, 'import numpy as np\n'), ((2527, 2546), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2535, 2546), True, 'import numpy as np\n'), ((743, 752), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (749, 752), True, 'import numpy as np\n'), ((906, 915), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (912, 915), True, 'import numpy as np\n'), ((1073, 1083), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (1080, 1083), True, 'import numpy as np\n'), ((1091, 1103), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (1100, 1103), True, 'import numpy as np\n'), ((1120, 1132), 'numpy.add', 'np.add', (['a', 'a'], {}), '(a, a)\n', (1126, 1132), True, 'import numpy as np\n')] |
import torch
import numpy as np
def dgms_tensor_list(ReducedCC, maxHomdim):
"""
Persistent Diagrams to tensor list
Return a list of tensors, where each tensor is a Persistent Diagrams at a dimension
Note:
1. We also return zero length Bar for gradient computation consideration
2. The everlasting persistence bars will have death index as
64-bit unsigned maximum integer, and so we set it to be -1 here.
3. The everlasting persistence bars will have death values as float('inf'),
so you can use `p.death() == float('inf')` to find the it.
-Inputs:
ReducedCC - Reduced Chain Complex in bats
maxHomdim - maximum homology dimension
-Outputs:
dgms: a list of PD tensors
bdinds: birth-death indices (used to find gradient later)
"""
dgms = []
bdinds = []
for i in range(maxHomdim + 1):
bd_pair, bd_inds = ReducedCC.persistence_pairs_vec(i)
# bd_pair: a list of birth-death pairs with
# length 2 * number of bd pairs.
# also note that it includes zeros length ones
bd_inds = np.array(bd_inds)
bd_pair = np.array(bd_pair)
bd_inds[bd_inds == 0xFFFFFFFFFFFFFFFF] = -1 # take care of bats.NO_IND
# convert to tensor
bd_pair = torch.tensor(bd_pair.reshape(-1,2), requires_grad = True)
bd_inds = torch.tensor(bd_inds.reshape(-1,2), requires_grad = False, dtype=torch.long)
# add it
dgms.append(bd_pair)
bdinds.append(bd_inds)
return dgms, bdinds
| [
"numpy.array"
] | [((1125, 1142), 'numpy.array', 'np.array', (['bd_inds'], {}), '(bd_inds)\n', (1133, 1142), True, 'import numpy as np\n'), ((1161, 1178), 'numpy.array', 'np.array', (['bd_pair'], {}), '(bd_pair)\n', (1169, 1178), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import tensorflow as tf
import pandas as pd
def resize(image, size):
img = tf.io.read_file(image)
x = tf.keras.preprocessing.image.smart_resize(tf.image.decode_jpeg(img, channels=3), [size, size],
interpolation='bilinear')
tf.keras.preprocessing.image.save_img(image, x)
def match_images(name):
template = cv2.imread('Template_Matching/templates/v6_228_T_v2.jpg')
#template = cv2.imread('templates/v6_228_T_v2.jpg')
original_image = cv2.imread(name)
original_copy = original_image.copy()
original_copy = cv2.cvtColor(np.array(original_copy), cv2.COLOR_BGR2GRAY)
blur_t = cv2.blur(template,(5,5))
blur = cv2.blur(original_copy,(5,5))
template = cv2.Canny(blur_t, 125, 225)
img = cv2.Canny(blur, 50, 100)
#cv2.imwrite('Data/Template_Matching_Results/ex1/Template_canny.jpg', template)
w, h = template.shape[::-1]
detected = cv2.matchTemplate(img, template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(detected)
#print (f'min_val:{min_val}, max_val:{max_val}, min_loc:{min_loc}, max_loc:{max_loc}')
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(original_image, top_left, bottom_right, 255, 2)
x1, y1 = top_left
x2, y2 = bottom_right
# removing blue frame form image
x1 = x1 + 2
x2 = x2 - 2
y1 = y1 + 2
y2 = y2 - 2
image = original_image[y1:y2, x1:x2]
#image = original_copy[y1:y2, x1:x2]
cv2.imwrite(name, image)
#return image
def preprocess_images():
df = pd.read_csv('Train.csv')
for index, row in df.iterrows():
p = row['Path']
if (row['Frontal/Lateral'] == 'Frontal'):
resize(p, 250)
match_images(p)
elif (row['Frontal/Lateral'] == 'Lateral'):
resize(p, 224)
df2 = pd.read_csv('Valid.csv')
for index, row in df2.iterrows():
p = row['Path']
if (row['Frontal/Lateral'] == 'Frontal'):
resize(p, 250)
match_images(p)
elif (row['Frontal/Lateral'] == 'Lateral'):
resize(p, 224)
| [
"cv2.rectangle",
"cv2.imwrite",
"pandas.read_csv",
"tensorflow.io.read_file",
"cv2.minMaxLoc",
"numpy.array",
"tensorflow.keras.preprocessing.image.save_img",
"cv2.matchTemplate",
"cv2.Canny",
"cv2.imread",
"cv2.blur",
"tensorflow.image.decode_jpeg"
] | [((110, 132), 'tensorflow.io.read_file', 'tf.io.read_file', (['image'], {}), '(image)\n', (125, 132), True, 'import tensorflow as tf\n'), ((313, 360), 'tensorflow.keras.preprocessing.image.save_img', 'tf.keras.preprocessing.image.save_img', (['image', 'x'], {}), '(image, x)\n', (350, 360), True, 'import tensorflow as tf\n'), ((402, 459), 'cv2.imread', 'cv2.imread', (['"""Template_Matching/templates/v6_228_T_v2.jpg"""'], {}), "('Template_Matching/templates/v6_228_T_v2.jpg')\n", (412, 459), False, 'import cv2\n'), ((537, 553), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (547, 553), False, 'import cv2\n'), ((690, 716), 'cv2.blur', 'cv2.blur', (['template', '(5, 5)'], {}), '(template, (5, 5))\n', (698, 716), False, 'import cv2\n'), ((726, 757), 'cv2.blur', 'cv2.blur', (['original_copy', '(5, 5)'], {}), '(original_copy, (5, 5))\n', (734, 757), False, 'import cv2\n'), ((771, 798), 'cv2.Canny', 'cv2.Canny', (['blur_t', '(125)', '(225)'], {}), '(blur_t, 125, 225)\n', (780, 798), False, 'import cv2\n'), ((809, 833), 'cv2.Canny', 'cv2.Canny', (['blur', '(50)', '(100)'], {}), '(blur, 50, 100)\n', (818, 833), False, 'import cv2\n'), ((966, 1013), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img', 'template', 'cv2.TM_CCOEFF'], {}), '(img, template, cv2.TM_CCOEFF)\n', (983, 1013), False, 'import cv2\n'), ((1055, 1078), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['detected'], {}), '(detected)\n', (1068, 1078), False, 'import cv2\n'), ((1252, 1313), 'cv2.rectangle', 'cv2.rectangle', (['original_image', 'top_left', 'bottom_right', '(255)', '(2)'], {}), '(original_image, top_left, bottom_right, 255, 2)\n', (1265, 1313), False, 'import cv2\n'), ((1551, 1575), 'cv2.imwrite', 'cv2.imwrite', (['name', 'image'], {}), '(name, image)\n', (1562, 1575), False, 'import cv2\n'), ((1629, 1653), 'pandas.read_csv', 'pd.read_csv', (['"""Train.csv"""'], {}), "('Train.csv')\n", (1640, 1653), True, 'import pandas as pd\n'), ((1910, 1934), 'pandas.read_csv', 'pd.read_csv', (['"""Valid.csv"""'], {}), "('Valid.csv')\n", (1921, 1934), True, 'import pandas as pd\n'), ((183, 220), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (203, 220), True, 'import tensorflow as tf\n'), ((631, 654), 'numpy.array', 'np.array', (['original_copy'], {}), '(original_copy)\n', (639, 654), True, 'import numpy as np\n')] |
# extract data from MMP1 atlas
import os,glob,sys
import nibabel
import pandas,numpy
basedir_input='/scratch/01329/poldrack/GOBS/GOBS_bids/derivatives/fmriprep'
try:
subcode=sys.argv[1]
except:
subcode='sub-EM2204'
# load atlas surface
atlasdir='/work/01329/poldrack/stampede2/code/GOBS/extract/HCP-MMP1'
atlas={'L':'lh.HCP-MMP1.fsaverage5.gii','R':'rh.HCP-MMP1.fsaverage5.gii'}
atlasdata={}
atlaslabels={}
for a in atlas:
atlaslabeltable=nibabel.load(os.path.join(atlasdir,atlas[a])).labeltable.labels
atlaslabels[a]=[i.label for i in atlaslabeltable[1:]]
atlasdata[a]=nibabel.load(os.path.join(atlasdir,atlas[a])).darrays[0].data
allatlaslabels=atlaslabels['L']+atlaslabels['R']
funcfiles={}
for hemis in ['L','R']:
funcfiles[hemis]=glob.glob(os.path.join(basedir_input,subcode,'func/*task-rest_run-1_bold_space-fsaverage5.%s.func.gii'%hemis))[0]
roidata=numpy.zeros((360,150)) # 360 ROIs by 150 timepoints
offset={'L':0,'R':180}
for hemis in funcfiles:
print(hemis,funcfiles[hemis])
funcdata=nibabel.load(funcfiles[hemis])
for region in range(180):
regionverts=atlasdata[hemis]==region+1
for tp in range(150):
roidata[region+offset[hemis],tp]=numpy.mean(funcdata.darrays[tp].data[regionverts])
roidata_df=pandas.DataFrame(roidata.T,columns=allatlaslabels)
outfile=os.path.join(basedir_input,subcode,'func/HCP-MMP1.roidata.txt')
roidata_df.to_csv(outfile,index=False)
#numpy.savetxt(outfile,roidata)
| [
"numpy.mean",
"nibabel.load",
"os.path.join",
"numpy.zeros",
"pandas.DataFrame"
] | [((885, 908), 'numpy.zeros', 'numpy.zeros', (['(360, 150)'], {}), '((360, 150))\n', (896, 908), False, 'import pandas, numpy\n'), ((1273, 1324), 'pandas.DataFrame', 'pandas.DataFrame', (['roidata.T'], {'columns': 'allatlaslabels'}), '(roidata.T, columns=allatlaslabels)\n', (1289, 1324), False, 'import pandas, numpy\n'), ((1332, 1397), 'os.path.join', 'os.path.join', (['basedir_input', 'subcode', '"""func/HCP-MMP1.roidata.txt"""'], {}), "(basedir_input, subcode, 'func/HCP-MMP1.roidata.txt')\n", (1344, 1397), False, 'import os, glob, sys\n'), ((1031, 1061), 'nibabel.load', 'nibabel.load', (['funcfiles[hemis]'], {}), '(funcfiles[hemis])\n', (1043, 1061), False, 'import nibabel\n'), ((772, 880), 'os.path.join', 'os.path.join', (['basedir_input', 'subcode', "('func/*task-rest_run-1_bold_space-fsaverage5.%s.func.gii' % hemis)"], {}), "(basedir_input, subcode, \n 'func/*task-rest_run-1_bold_space-fsaverage5.%s.func.gii' % hemis)\n", (784, 880), False, 'import os, glob, sys\n'), ((1210, 1260), 'numpy.mean', 'numpy.mean', (['funcdata.darrays[tp].data[regionverts]'], {}), '(funcdata.darrays[tp].data[regionverts])\n', (1220, 1260), False, 'import pandas, numpy\n'), ((468, 500), 'os.path.join', 'os.path.join', (['atlasdir', 'atlas[a]'], {}), '(atlasdir, atlas[a])\n', (480, 500), False, 'import os, glob, sys\n'), ((605, 637), 'os.path.join', 'os.path.join', (['atlasdir', 'atlas[a]'], {}), '(atlasdir, atlas[a])\n', (617, 637), False, 'import os, glob, sys\n')] |
import numpy as np
def target_transform(y: np.array, increment: float=0.01) -> np.array:
"""
Transform non-negative array to R using np.log
:param y: np.array
:param increment: float
:return:
"""
return np.log(y + increment)
def target_inverse_transform(y_trn: np.array, increment: float=0.01) -> np.array:
"""
Inverse transform of array in R to non-negative
:param y_trn: np.array
:param increment: float
:return:
"""
return np.exp(y_trn) - increment | [
"numpy.exp",
"numpy.log"
] | [((233, 254), 'numpy.log', 'np.log', (['(y + increment)'], {}), '(y + increment)\n', (239, 254), True, 'import numpy as np\n'), ((486, 499), 'numpy.exp', 'np.exp', (['y_trn'], {}), '(y_trn)\n', (492, 499), True, 'import numpy as np\n')] |
from collections import defaultdict
from math import ceil
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
import albumentations as albu
from albumentations.pytorch.transforms import img_to_tensor
from am.segment.image_utils import read_image
def default_transform(p=1):
return albu.Compose([
albu.Normalize(p=1)
], p=p)
def train_transform(p=1):
return albu.Compose([
albu.VerticalFlip(p=0.5),
albu.HorizontalFlip(p=0.5),
albu.Transpose(p=0.5),
albu.RandomRotate90(p=0.5),
albu.ShiftScaleRotate(p=1),
albu.IAAAdditiveGaussianNoise(p=0.5, scale=(0, 0.02 * 255)),
albu.OneOf([
albu.CLAHE(p=1, clip_limit=3),
albu.RandomBrightnessContrast(p=1, brightness_limit=0.2, contrast_limit=0.2),
albu.RandomGamma(p=1, gamma_limit=(80, 120)),
], p=1),
albu.Normalize(),
albu.Resize(512, 512),
], p=p)
def valid_transform(p=1):
return albu.Compose([
albu.Normalize(),
albu.Resize(512, 512),
], p=p)
class AMDataset(Dataset):
def __init__(self, image_df, mask_df, transform=None):
self.image_df = image_df
self.mask_df = mask_df
self.transform = transform
def __len__(self):
return len(self.mask_df)
@staticmethod
def _read_image(path, one_channel):
image = read_image(path, ch_n=1 if one_channel else 3)
if one_channel:
image = image[:, :, None]
return image
def __getitem__(self, idx):
image_path = self.image_df.iloc[idx].path
image = self._read_image(image_path, one_channel=False)
mask_path = self.mask_df.iloc[idx].path
if mask_path.exists():
mask = self._read_image(mask_path, one_channel=True)
else:
mask = np.zeros_like(image)[:, :, :1]
if self.transform:
augmented = self.transform(image=image, mask=mask)
image = augmented['image']
mask = augmented['mask']
image = img_to_tensor(image)
mask = img_to_tensor(mask)
return image, mask
def __add__(self, other):
comb_image_df = pd.concat([self.image_df, other.image_df])
comb_mask_df = pd.concat([self.mask_df, other.mask_df])
return AMDataset(comb_image_df, comb_mask_df, self.transform)
def create_image_mask_dfs(data_path):
experiment = data_path.parent.name
image_paths = defaultdict(list)
for group_path in data_path.iterdir():
for image_path in sorted((group_path / 'source').glob('*.png')):
mask_path = group_path / 'mask' / image_path.name
image_paths['source'].append((experiment, group_path.name, image_path))
image_paths['mask'].append((experiment, group_path.name, mask_path))
image_df = pd.DataFrame(image_paths['source'], columns=['experiment', 'group', 'path'])
mask_df = pd.DataFrame(image_paths['mask'], columns=['experiment', 'group', 'path'])
return image_df, mask_df
def create_ds(data_path, transform=None, groups=None, size=None):
image_df, mask_df = create_image_mask_dfs(Path(data_path))
image_n, mask_n = len(image_df), len(mask_df)
assert image_n > 0, f'No image files found at {data_path}'
assert image_n == mask_n, f'Different number of source and mask files: {image_n} != {mask_n}'
if groups:
image_df = image_df[image_df.group.isin(groups)]
mask_df = mask_df[mask_df.group.isin(groups)]
if size:
n = image_df.shape[0]
if n < size:
mult = ceil(size / n)
image_df = pd.concat([image_df] * mult).head(size)
mask_df = pd.concat([mask_df] * mult).head(size)
else:
inds = np.random.choice(image_df.shape[0], size, replace=False)
image_df = image_df.iloc[inds]
mask_df = mask_df.iloc[inds]
return AMDataset(image_df, mask_df, transform=transform)
def create_dl(
paths, transform=None, path_image_n=None, shuffle=True, batch_size=4, num_workers=4
):
assert paths
print(f'Loading data from {paths} paths')
ds = None
while paths:
path = paths.pop(0)
path_ds = create_ds(
Path(path), transform=transform, size=path_image_n
)
if not ds:
ds = path_ds
else:
ds += path_ds
dl = DataLoader(
dataset=ds,
shuffle=shuffle,
num_workers=num_workers,
batch_size=batch_size,
pin_memory=torch.cuda.is_available()
)
return dl
| [
"albumentations.pytorch.transforms.img_to_tensor",
"albumentations.RandomBrightnessContrast",
"albumentations.RandomGamma",
"torch.cuda.is_available",
"pathlib.Path",
"am.segment.image_utils.read_image",
"numpy.zeros_like",
"pandas.concat",
"pandas.DataFrame",
"albumentations.Transpose",
"albume... | [((2535, 2552), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2546, 2552), False, 'from collections import defaultdict\n'), ((2913, 2989), 'pandas.DataFrame', 'pd.DataFrame', (["image_paths['source']"], {'columns': "['experiment', 'group', 'path']"}), "(image_paths['source'], columns=['experiment', 'group', 'path'])\n", (2925, 2989), True, 'import pandas as pd\n'), ((3004, 3078), 'pandas.DataFrame', 'pd.DataFrame', (["image_paths['mask']"], {'columns': "['experiment', 'group', 'path']"}), "(image_paths['mask'], columns=['experiment', 'group', 'path'])\n", (3016, 3078), True, 'import pandas as pd\n'), ((1453, 1499), 'am.segment.image_utils.read_image', 'read_image', (['path'], {'ch_n': '(1 if one_channel else 3)'}), '(path, ch_n=1 if one_channel else 3)\n', (1463, 1499), False, 'from am.segment.image_utils import read_image\n'), ((2123, 2143), 'albumentations.pytorch.transforms.img_to_tensor', 'img_to_tensor', (['image'], {}), '(image)\n', (2136, 2143), False, 'from albumentations.pytorch.transforms import img_to_tensor\n'), ((2159, 2178), 'albumentations.pytorch.transforms.img_to_tensor', 'img_to_tensor', (['mask'], {}), '(mask)\n', (2172, 2178), False, 'from albumentations.pytorch.transforms import img_to_tensor\n'), ((2261, 2303), 'pandas.concat', 'pd.concat', (['[self.image_df, other.image_df]'], {}), '([self.image_df, other.image_df])\n', (2270, 2303), True, 'import pandas as pd\n'), ((2327, 2367), 'pandas.concat', 'pd.concat', (['[self.mask_df, other.mask_df]'], {}), '([self.mask_df, other.mask_df])\n', (2336, 2367), True, 'import pandas as pd\n'), ((3222, 3237), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (3226, 3237), False, 'from pathlib import Path\n'), ((386, 405), 'albumentations.Normalize', 'albu.Normalize', ([], {'p': '(1)'}), '(p=1)\n', (400, 405), True, 'import albumentations as albu\n'), ((480, 504), 'albumentations.VerticalFlip', 'albu.VerticalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (497, 504), True, 'import albumentations as albu\n'), ((514, 540), 'albumentations.HorizontalFlip', 'albu.HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (533, 540), True, 'import albumentations as albu\n'), ((550, 571), 'albumentations.Transpose', 'albu.Transpose', ([], {'p': '(0.5)'}), '(p=0.5)\n', (564, 571), True, 'import albumentations as albu\n'), ((581, 607), 'albumentations.RandomRotate90', 'albu.RandomRotate90', ([], {'p': '(0.5)'}), '(p=0.5)\n', (600, 607), True, 'import albumentations as albu\n'), ((617, 643), 'albumentations.ShiftScaleRotate', 'albu.ShiftScaleRotate', ([], {'p': '(1)'}), '(p=1)\n', (638, 643), True, 'import albumentations as albu\n'), ((653, 712), 'albumentations.IAAAdditiveGaussianNoise', 'albu.IAAAdditiveGaussianNoise', ([], {'p': '(0.5)', 'scale': '(0, 0.02 * 255)'}), '(p=0.5, scale=(0, 0.02 * 255))\n', (682, 712), True, 'import albumentations as albu\n'), ((951, 967), 'albumentations.Normalize', 'albu.Normalize', ([], {}), '()\n', (965, 967), True, 'import albumentations as albu\n'), ((977, 998), 'albumentations.Resize', 'albu.Resize', (['(512)', '(512)'], {}), '(512, 512)\n', (988, 998), True, 'import albumentations as albu\n'), ((1074, 1090), 'albumentations.Normalize', 'albu.Normalize', ([], {}), '()\n', (1088, 1090), True, 'import albumentations as albu\n'), ((1100, 1121), 'albumentations.Resize', 'albu.Resize', (['(512)', '(512)'], {}), '(512, 512)\n', (1111, 1121), True, 'import albumentations as albu\n'), ((3661, 3675), 'math.ceil', 'ceil', (['(size / n)'], {}), '(size / n)\n', (3665, 3675), False, 'from math import ceil\n'), ((3833, 3889), 'numpy.random.choice', 'np.random.choice', (['image_df.shape[0]', 'size'], {'replace': '(False)'}), '(image_df.shape[0], size, replace=False)\n', (3849, 3889), True, 'import numpy as np\n'), ((4309, 4319), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (4313, 4319), False, 'from pathlib import Path\n'), ((4604, 4629), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4627, 4629), False, 'import torch\n'), ((1908, 1928), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1921, 1928), True, 'import numpy as np\n'), ((747, 776), 'albumentations.CLAHE', 'albu.CLAHE', ([], {'p': '(1)', 'clip_limit': '(3)'}), '(p=1, clip_limit=3)\n', (757, 776), True, 'import albumentations as albu\n'), ((790, 866), 'albumentations.RandomBrightnessContrast', 'albu.RandomBrightnessContrast', ([], {'p': '(1)', 'brightness_limit': '(0.2)', 'contrast_limit': '(0.2)'}), '(p=1, brightness_limit=0.2, contrast_limit=0.2)\n', (819, 866), True, 'import albumentations as albu\n'), ((880, 924), 'albumentations.RandomGamma', 'albu.RandomGamma', ([], {'p': '(1)', 'gamma_limit': '(80, 120)'}), '(p=1, gamma_limit=(80, 120))\n', (896, 924), True, 'import albumentations as albu\n'), ((3699, 3727), 'pandas.concat', 'pd.concat', (['([image_df] * mult)'], {}), '([image_df] * mult)\n', (3708, 3727), True, 'import pandas as pd\n'), ((3761, 3788), 'pandas.concat', 'pd.concat', (['([mask_df] * mult)'], {}), '([mask_df] * mult)\n', (3770, 3788), True, 'import pandas as pd\n')] |
from ..edge.non_recursive.jit import (
euler_tour_edge,
)
# TODO cut below
import typing
import numpy as np
import numba as nb
@nb.njit
def euler_tour_node(
g: np.ndarray,
edge_idx: np.ndarray,
root: int,
) -> typing.Tuple[(np.ndarray, ) * 4]:
tour, parent, depth = euler_tour_edge(g, edge_idx, root)
n = len(tour) >> 1
tour = tour[:-1]
first_idx = np.full(n, -1, np.int64)
for i in range(2 * n - 1):
u = tour[i]
if u < 0:
tour[i] = parent[~u]
continue
first_idx[u] = i
return tour, first_idx, parent, depth
| [
"numpy.full"
] | [((372, 396), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (379, 396), True, 'import numpy as np\n')] |
"""
This script performs a simplified version of the procedures described in
<NAME>. Theory of second order stationary random processes applied to GPS coordinate time-series. GPS Solut 22, 86 (2018). https://doi.org/10.1007/s10291-018-0748-4
This script requires the name of the GPS station to be studied as a parameter when launched (e.g. >>>python SGprojBB.py AB07)
"""
import sys
import pandas as pd
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy import linalg
from scipy.optimize import curve_fit, OptimizeWarning
from statsmodels.tsa.stattools import kpss, acf
import warnings
from statsmodels.tools.sm_exceptions import InterpolationWarning
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter('ignore', InterpolationWarning)
warnings.simplefilter('ignore', category=RuntimeWarning)
warnings.simplefilter('ignore', OptimizeWarning)
#Data structure
class envdata:
def __init__(self):
self.site = None
#self.YYMMMDD = None
self.yyyy_yyyy = None
self.MJD = None
self.week = None
self.d = None
self.reflon = None
self.e0 = None
self.east = None
self.n0 = None
self.north = None
self.u0 = None
self.up = None
self.ant = None
self.sig_e = None
self.sig_n = None
self.sig_u = None
self.corr_en = None
self.corr_eu = None
self.corr_nu = None
self.disclist = []
self.seisdisclist = []
self.comp_e = self.Comp()
self.comp_e.comp = 'E'
self.comp_n = self.Comp()
self.comp_n.comp = 'N'
self.comp_u = self.Comp()
self.comp_u.comp = 'U'
class Comp:
def __init__(self):
self.comp = None
self.periodlist = []
self.A = None
self.C = None
self.xh = None
self.res = None
self.acf = None
self.Chh = None
#Data "reading"
def data(self, dataframe):
self.site = dataframe.iloc[0]['site']
#self.YYMMMDD = dataframe['YYMMMDD'].to_numpy()
self.yyyy_yyyy = dataframe['yyyy.yyyy'].to_numpy()
self.MJD = dataframe['__MJD'].to_numpy()
self.week = dataframe['week'].to_numpy()
self.d = dataframe['d'].to_numpy()
self.reflon = dataframe['reflon'].to_numpy()
self.e0 = dataframe['_e0(m)'].to_numpy()
self.east = dataframe['__east(m)'].to_numpy()
self.n0 = dataframe['____n0(m)'].to_numpy()
self.north = dataframe['_north(m)'].to_numpy()
self.u0 = dataframe['u0(m)'].to_numpy()
self.up = dataframe['____up(m)'].to_numpy()
self.ant = dataframe['_ant(m)'].to_numpy()
self.sig_e = dataframe['sig_e(m)'].to_numpy()
self.sig_n = dataframe['sig_n(m)'].to_numpy()
self.sig_u = dataframe['sig_u(m)'].to_numpy()
self.corr_en = dataframe['__corr_en'].to_numpy()
self.corr_eu = dataframe['__corr_eu'].to_numpy()
self.corr_nu = dataframe['__corr_nu'].to_numpy()
#Perform basic pre-processing operations (e.g. remove data gap)
def pre_proc(self):
MJDrange = range(self.MJD[0], self.MJD[-1]+1)
for attr, value in self.__dict__.items():
if attr in ['yyyy_yyyy', 'week', 'd', 'reflon', 'e0', 'east', 'n0', 'north', 'u0', 'up', 'ant', 'sig_e', 'sig_n', 'sig_u', 'corr_en', 'corr_eu', 'corr_nu']:
f = interp1d(self.MJD, value)
setattr(self, attr, f(MJDrange))
self.MJD = MJDrange
disclist = []
for tdisc in self.disclist:
if tdisc in self.MJD:
disclist.append(tdisc)
self.disclist = disclist
for y in [self.east, self.north, self.up]:
sos = signal.butter(4, (1/len(y), 0.5), 'bandpass', output='sos')
y = signal.sosfiltfilt(sos, y)
#Define the design matrix
def design_matrix(self, chan=None):
if chan == None:
chan=[self.comp_e, self.comp_n, self.comp_u]
for cha in chan:
A = np.zeros((len(self.MJD), 2+2*len(cha.periodlist)+len(self.disclist)))
for i in range(len(self.MJD)):
t = self.MJD[i]
A[i][0:2]=(1, t)
for j in range(len(cha.periodlist)):
omega = 2*np.pi/cha.periodlist[j]
A[i][2+2*j] = np.sin(omega*t)
A[i][2+2*j+1] = np.cos(omega*t)
disc_thetas = []
for tdisc in self.disclist:
disc_thetas.append(np.heaviside(t - tdisc, 1))
if t-tdisc < 0:
value = 0
else:
value=1
A[i][2*len(cha.periodlist)+2:] = disc_thetas
cha.A = A
#Perform the LSE
def LSE(self, cha_y=None):
if cha_y == None:
cha_y=[[self.comp_e, self.east], [self.comp_n, self.north], [self.comp_u, self.up]]
for cha, y in cha_y:
A = cha.A
C = cha.C
Ci = linalg.pinv(C)
AtCi = A.T.dot(Ci)
d = 0
xh = linalg.pinv(AtCi.dot(A)).dot(AtCi).dot(y-d)
cha.xh = xh
#Evaluates the residuals
def residuals(self, cha_y=None):
if cha_y == None:
cha_y=[[self.comp_e, self.east], [self.comp_n, self.north], [self.comp_u, self.up]]
for cha, y in cha_y:
cha.res = cha.A.dot(cha.xh) - y
#Evaluates the periodicities in the signal
def periodicities(self):
for cha, y in [[self.comp_e, self.east], [self.comp_n, self.north], [self.comp_u, self.up]]:
olderr = np.sum(cha.res**2)
while True:
f, P = signal.periodogram(cha.res, 1, window ='hamming', scaling='spectrum')
cha.periodlist.append(1/f[np.argmax(P)])
self.design_matrix([cha])
self.LSE([[cha, y]])
self.residuals([[cha, y]])
if np.sum(cha.res**2)>olderr:
cha.periodlist = cha.periodlist[:-1]
self.design_matrix([cha])
self.LSE([[cha, y]])
self.residuals([[cha, y]])
break
else:
olderr = np.sum(cha.res)
#Perform a KPSS test on the residuals and, if failed, terminate the script
def KPSS_test(self):
for cha in [self.comp_e, self.comp_n, self.comp_u]:
statistic, p_value, n_lags, critical_values = kpss(cha.res)
if p_value < 0.05:
print(f'The {cha.comp} series is not stationary')
print(f'KPSS_c p-value:{p_value}')
sys.exit(0)
#Evaluate the ACF on the residuals and fitting with positive definite function
def resACF(self):
for cha in [self.comp_e, self.comp_n, self.comp_u]:
eacf = acf(cha.res, nlags=len(cha.res)-1)
N = 0
c = 0
for i in range(len(eacf)-1):
if np.sign(eacf[i])!= np.sign(eacf[i+1]):
if c == 0:
c = 1
elif c==1:
N = i
break
def ep(t, a, b, c):
return a*np.exp(-b*t)*(1-c*t**2)
def gp(t, a, b, c):
return a*np.exp(-b*t**2)*(1-c*t**2)
def ec(t, a, b, c):
return a*np.exp(-b*t)*np.cos(1-c*t)
def gc(t, a, b, c):
return a*np.exp(-b*t**2)*np.cos(c*t)
def e(t, a, b):
return a*np.exp(-b*t)
def g(t, a, b):
return a*np.exp(-b*t**2)
def err(x, y):
return np.sum((x-y)**2)
old_err = np.inf
t = np.array(self.MJD) - self.MJD[0]
for f in [ep, gp, ec, gc, e, g]:
popt, pcov = curve_fit(f, t[1:N], eacf[1:N])
sacf = f(t, *popt)
sacf[0] = 1
if err(sacf[1:N], eacf[1:N])<old_err:
cha.acf = sacf
old_err = err(sacf[1:N], eacf[1:N])
#Define or update the covariance matrix
def C_update(self):
for cha, sig in [[self.comp_e, self.sig_e], [self.comp_n, self.sig_n], [self.comp_u, self.sig_u]]:
if cha.acf is None:
cha.C = np.diag(sig**2)
else:
N = len(cha.acf)
C = np.zeros((N,N))
for i in range(N):
for j in range(N):
tau = np.abs(i-j)
#if tau != 0:
C[i,j] = cha.acf[tau]
cha.C = C + np.diag(sig**2)
#Evaluate the covariance matrix for the estimated parameters
def C_est(self):
for cha in [self.comp_e, self.comp_n, self.comp_u]:
varh = cha.res.T.dot(np.linalg.pinv(cha.C)).dot(cha.res)/(len(cha.res)-len(cha.xh))
Chh = varh*np.linalg.pinv(cha.A.T.dot(np.linalg.pinv(cha.C)).dot(cha.A))
cha.Cest = Chh
array_sum = np.sum(Chh)
array_has_nan = np.isnan(array_sum)
#Show on screen and save on file the results
def results(self):
with open(site+'.res', 'w', encoding="utf-8") as out:
out.write(f'Site: {env.site}\n')
out.write(f'Start: {env.MJD[0]} MJD\n')
out.write(f'End: {env.MJD[-1]} MJD\n')
out.write('Stochastic model: x(t)=A0+Av*t+\u03A3A1(T)*sin(2\u03C0t/T)+\u03A3A2(T)*sin(2\u03C0t/T)+\u03A3Aco(Tco)*H(t-Tco)\n')
for cha, chaname in [[self.comp_e, 'EAST'], [self.comp_n, 'NORTH'], [self.comp_u, 'UP']]:
sd = np.sqrt(np.diag(cha.Cest))
varlist = ['A0', 'Av']
for P in cha.periodlist:
varlist.append(f'A1(T={np.round(P, 1)} d)')
varlist.append(f'A2(T={np.round(P, 1)} d)')
for T in self.disclist:
varlist.append(f'ACO(MJD={T})')
print(f'{chaname} COMPONENT RESULTS:')
out.write(f'{chaname} COMPONENT RESULTS:\n')
for res in enumerate(cha.xh):
print(f'{varlist[res[0]]} : {np.round(res[1], 5)} \u00B1 {np.round(sd[res[0]], 5)} m')
out.write(f'{varlist[res[0]]} : {np.round(res[1], 5)} \u00B1 {np.round(sd[res[0]], 5)} m\n')
print('='*50)
out.write('='*50+'\n')
#Main program
if __name__ == '__main__':
#Proper call of the script
if len(sys.argv) == 1:
print("You must specify the station name!")
sys.exit(0)
#Specify the number of observation to use (from the newest)
lastdata = 1817
#Color-noise stochastic model?
cncheck = True
#Reading data from the file
site = sys.argv[1]
datafile = site+'.tenv3'
df = pd.read_csv(datafile, header=0, skipinitialspace=True, delim_whitespace=True)
env = envdata()
env.data(df.tail(lastdata))
#Defining known discontinuities (from Database of Potential Step Discontinuities)
columnlist = ['site', 'date']
try:
steps = pd.read_csv('steps.txt', header=None, skipinitialspace=True, delim_whitespace=True, usecols=[0,1,2])
except:
print("You must have the Database of Potential Step Discontinuities file in the current folder!")
monthlist = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
for index, row in steps.iterrows():
if row[0] == site:
day = int(row[1][5:7])
month = monthlist.index(row[1][2:5])+1
year = int(row[1][0:2])
year = year + 2000 if year<50 else year+1900
disctime = int(np.round(367 * year - 7 * (year + (month + 9)/12)/4 + 275 * month/9 + day - 678986.5))
if disctime not in env.disclist:
env.disclist.append(disctime)
#Correcting the data for gaps (interpolated) & other preprocessing steps
env.pre_proc()
#White noise model LSE
env.design_matrix()
env.C_update()
env.LSE()
env.residuals()
env.periodicities()
#Color noise model LSE
if cncheck == True:
env.KPSS_test()
env.resACF()
env.C_update()
env.LSE()
env.residuals()
#Covariance matrix of estimated parameters
env.C_est()
#Showing/Saving the results
env.results()
#Plotting the data and LSE fitted model
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex = True)
ax1.plot(env.MJD, env.east)
ax1.plot(env.MJD, env.comp_e.A.dot(env.comp_e.xh))
ax1.set_ylabel('East [m]')
ax1.grid()
ax2.plot(env.MJD, env.north)
ax2.plot(env.MJD, env.comp_n.A.dot(env.comp_n.xh))
ax2.set_ylabel('North [m]')
ax2.grid()
ax3.plot(env.MJD, env.up)
ax3.plot(env.MJD, env.comp_u.A.dot(env.comp_u.xh))
ax3.set_ylabel('Up [m]')
ax3.set_xlabel('MJD [d]')
ax3.grid()
plt.show()
| [
"numpy.linalg.pinv",
"pandas.read_csv",
"scipy.interpolate.interp1d",
"numpy.array",
"sys.exit",
"numpy.sin",
"numpy.heaviside",
"numpy.exp",
"scipy.signal.sosfiltfilt",
"warnings.simplefilter",
"numpy.round",
"numpy.abs",
"statsmodels.tsa.stattools.kpss",
"numpy.argmax",
"numpy.isnan",
... | [((727, 789), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (748, 789), False, 'import warnings\n'), ((790, 843), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'InterpolationWarning'], {}), "('ignore', InterpolationWarning)\n", (811, 843), False, 'import warnings\n'), ((844, 900), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (865, 900), False, 'import warnings\n'), ((901, 949), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'OptimizeWarning'], {}), "('ignore', OptimizeWarning)\n", (922, 949), False, 'import warnings\n'), ((11075, 11152), 'pandas.read_csv', 'pd.read_csv', (['datafile'], {'header': '(0)', 'skipinitialspace': '(True)', 'delim_whitespace': '(True)'}), '(datafile, header=0, skipinitialspace=True, delim_whitespace=True)\n', (11086, 11152), True, 'import pandas as pd\n'), ((12737, 12768), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True)\n', (12749, 12768), True, 'import matplotlib.pyplot as plt\n'), ((13222, 13232), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13230, 13232), True, 'import matplotlib.pyplot as plt\n'), ((10821, 10832), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (10829, 10832), False, 'import sys\n'), ((11351, 11457), 'pandas.read_csv', 'pd.read_csv', (['"""steps.txt"""'], {'header': 'None', 'skipinitialspace': '(True)', 'delim_whitespace': '(True)', 'usecols': '[0, 1, 2]'}), "('steps.txt', header=None, skipinitialspace=True,\n delim_whitespace=True, usecols=[0, 1, 2])\n", (11362, 11457), True, 'import pandas as pd\n'), ((3958, 3984), 'scipy.signal.sosfiltfilt', 'signal.sosfiltfilt', (['sos', 'y'], {}), '(sos, y)\n', (3976, 3984), False, 'from scipy import signal\n'), ((5191, 5205), 'scipy.linalg.pinv', 'linalg.pinv', (['C'], {}), '(C)\n', (5202, 5205), False, 'from scipy import linalg\n'), ((5809, 5829), 'numpy.sum', 'np.sum', (['(cha.res ** 2)'], {}), '(cha.res ** 2)\n', (5815, 5829), True, 'import numpy as np\n'), ((6680, 6693), 'statsmodels.tsa.stattools.kpss', 'kpss', (['cha.res'], {}), '(cha.res)\n', (6684, 6693), False, 'from statsmodels.tsa.stattools import kpss, acf\n'), ((9274, 9285), 'numpy.sum', 'np.sum', (['Chh'], {}), '(Chh)\n', (9280, 9285), True, 'import numpy as np\n'), ((9314, 9333), 'numpy.isnan', 'np.isnan', (['array_sum'], {}), '(array_sum)\n', (9322, 9333), True, 'import numpy as np\n'), ((3546, 3571), 'scipy.interpolate.interp1d', 'interp1d', (['self.MJD', 'value'], {}), '(self.MJD, value)\n', (3554, 3571), False, 'from scipy.interpolate import interp1d\n'), ((5875, 5943), 'scipy.signal.periodogram', 'signal.periodogram', (['cha.res', '(1)'], {'window': '"""hamming"""', 'scaling': '"""spectrum"""'}), "(cha.res, 1, window='hamming', scaling='spectrum')\n", (5893, 5943), False, 'from scipy import signal\n'), ((6858, 6869), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6866, 6869), False, 'import sys\n'), ((7896, 7916), 'numpy.sum', 'np.sum', (['((x - y) ** 2)'], {}), '((x - y) ** 2)\n', (7902, 7916), True, 'import numpy as np\n'), ((7958, 7976), 'numpy.array', 'np.array', (['self.MJD'], {}), '(self.MJD)\n', (7966, 7976), True, 'import numpy as np\n'), ((8065, 8096), 'scipy.optimize.curve_fit', 'curve_fit', (['f', 't[1:N]', 'eacf[1:N]'], {}), '(f, t[1:N], eacf[1:N])\n', (8074, 8096), False, 'from scipy.optimize import curve_fit, OptimizeWarning\n'), ((8544, 8561), 'numpy.diag', 'np.diag', (['(sig ** 2)'], {}), '(sig ** 2)\n', (8551, 8561), True, 'import numpy as np\n'), ((8631, 8647), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (8639, 8647), True, 'import numpy as np\n'), ((11951, 12046), 'numpy.round', 'np.round', (['(367 * year - 7 * (year + (month + 9) / 12) / 4 + 275 * month / 9 + day - \n 678986.5)'], {}), '(367 * year - 7 * (year + (month + 9) / 12) / 4 + 275 * month / 9 +\n day - 678986.5)\n', (11959, 12046), True, 'import numpy as np\n'), ((4498, 4515), 'numpy.sin', 'np.sin', (['(omega * t)'], {}), '(omega * t)\n', (4504, 4515), True, 'import numpy as np\n'), ((4550, 4567), 'numpy.cos', 'np.cos', (['(omega * t)'], {}), '(omega * t)\n', (4556, 4567), True, 'import numpy as np\n'), ((6143, 6163), 'numpy.sum', 'np.sum', (['(cha.res ** 2)'], {}), '(cha.res ** 2)\n', (6149, 6163), True, 'import numpy as np\n'), ((6438, 6453), 'numpy.sum', 'np.sum', (['cha.res'], {}), '(cha.res)\n', (6444, 6453), True, 'import numpy as np\n'), ((7186, 7202), 'numpy.sign', 'np.sign', (['eacf[i]'], {}), '(eacf[i])\n', (7193, 7202), True, 'import numpy as np\n'), ((7205, 7225), 'numpy.sign', 'np.sign', (['eacf[i + 1]'], {}), '(eacf[i + 1])\n', (7212, 7225), True, 'import numpy as np\n'), ((7612, 7629), 'numpy.cos', 'np.cos', (['(1 - c * t)'], {}), '(1 - c * t)\n', (7618, 7629), True, 'import numpy as np\n'), ((7699, 7712), 'numpy.cos', 'np.cos', (['(c * t)'], {}), '(c * t)\n', (7705, 7712), True, 'import numpy as np\n'), ((7764, 7778), 'numpy.exp', 'np.exp', (['(-b * t)'], {}), '(-b * t)\n', (7770, 7778), True, 'import numpy as np\n'), ((7830, 7849), 'numpy.exp', 'np.exp', (['(-b * t ** 2)'], {}), '(-b * t ** 2)\n', (7836, 7849), True, 'import numpy as np\n'), ((8875, 8892), 'numpy.diag', 'np.diag', (['(sig ** 2)'], {}), '(sig ** 2)\n', (8882, 8892), True, 'import numpy as np\n'), ((9890, 9907), 'numpy.diag', 'np.diag', (['cha.Cest'], {}), '(cha.Cest)\n', (9897, 9907), True, 'import numpy as np\n'), ((4683, 4709), 'numpy.heaviside', 'np.heaviside', (['(t - tdisc)', '(1)'], {}), '(t - tdisc, 1)\n', (4695, 4709), True, 'import numpy as np\n'), ((7434, 7448), 'numpy.exp', 'np.exp', (['(-b * t)'], {}), '(-b * t)\n', (7440, 7448), True, 'import numpy as np\n'), ((7515, 7534), 'numpy.exp', 'np.exp', (['(-b * t ** 2)'], {}), '(-b * t ** 2)\n', (7521, 7534), True, 'import numpy as np\n'), ((7599, 7613), 'numpy.exp', 'np.exp', (['(-b * t)'], {}), '(-b * t)\n', (7605, 7613), True, 'import numpy as np\n'), ((7683, 7702), 'numpy.exp', 'np.exp', (['(-b * t ** 2)'], {}), '(-b * t ** 2)\n', (7689, 7702), True, 'import numpy as np\n'), ((8751, 8764), 'numpy.abs', 'np.abs', (['(i - j)'], {}), '(i - j)\n', (8757, 8764), True, 'import numpy as np\n'), ((5987, 5999), 'numpy.argmax', 'np.argmax', (['P'], {}), '(P)\n', (5996, 5999), True, 'import numpy as np\n'), ((9075, 9096), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cha.C'], {}), '(cha.C)\n', (9089, 9096), True, 'import numpy as np\n'), ((9188, 9209), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cha.C'], {}), '(cha.C)\n', (9202, 9209), True, 'import numpy as np\n'), ((10032, 10046), 'numpy.round', 'np.round', (['P', '(1)'], {}), '(P, 1)\n', (10040, 10046), True, 'import numpy as np\n'), ((10096, 10110), 'numpy.round', 'np.round', (['P', '(1)'], {}), '(P, 1)\n', (10104, 10110), True, 'import numpy as np\n'), ((10420, 10439), 'numpy.round', 'np.round', (['res[1]', '(5)'], {}), '(res[1], 5)\n', (10428, 10439), True, 'import numpy as np\n'), ((10449, 10472), 'numpy.round', 'np.round', (['sd[res[0]]', '(5)'], {}), '(sd[res[0]], 5)\n', (10457, 10472), True, 'import numpy as np\n'), ((10531, 10550), 'numpy.round', 'np.round', (['res[1]', '(5)'], {}), '(res[1], 5)\n', (10539, 10550), True, 'import numpy as np\n'), ((10560, 10583), 'numpy.round', 'np.round', (['sd[res[0]]', '(5)'], {}), '(sd[res[0]], 5)\n', (10568, 10583), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
class obeservation(object):
def __init__(self, angle=360, lidarRange=300, accuracy=1, beems=1080):
# angle: the angular range of lidar
# lidarRange: the maximum distance of lidar's capacity
# accuracy: increment step size of each laser beem
# beems: how many beems exit in the range of the angle
self.angle = np.pi / 180.0 * angle
self.range = lidarRange
self.accuracy = accuracy
self.beems = beems
def render(self, mymap):
plt.imshow(mymap)
plt.show()
def observe(self, mymap, location, theta):
angle_start = theta - self.angle / 2
angle_end = theta + self.angle / 2
angles = np.linspace(angle_start, angle_end, num=self.beems)
beemsLayer = np.zeros_like(mymap)
distance_obs = np.zeros(self.beems)
intensity_obs = np.zeros(self.beems)
objects = [1, 3]
cosangle = np.cos(angles)
sinangle = np.sin(angles)
history = np.ones(self.beems)
distance = 0
while distance < self.range:
distance += self.accuracy
x = np.int32(location[0] + distance * cosangle)
y = np.int32(location[1] + distance * sinangle)
# print (x, y)
x = np.clip(x, 0, mymap.shape[0] - 1)
y = np.clip(y, 0, mymap.shape[1] - 1)
# print ("clipped:", x, y)
# break
intensity_obs[history == 1] = mymap[x[history == 1], y[history == 1]]
history = history * np.logical_or((mymap[x, y] == 0), (mymap[x, y] == 2))
distance_obs[history == 1] = distance
beemsLayer = self.drawPoints(beemsLayer, x, y, history=history, value=1)
mymap[beemsLayer == 1] = 4
lidar_map = mymap.copy()
return distance_obs, intensity_obs, beemsLayer, lidar_map
def findTarget(self, mymap, x, y, value=3):
if mymap[int(x)][int(y)] == value:
return True
else:
return False
def findObject(self, mymap, x, y, value=1):
if mymap[int(x)][int(y)] == value:
return True
else:
return False
def find_obstacle(self, mymap, x, y, value=1):
return mymap[x, y] == value
def drawPoint(self, mymap, x, y, value=2):
mymap[int(x)][int(y)] = value
return mymap
def drawPoints(self, mymap, x, y, history=None, value=4):
# print (history)
mymap[x[history == 1], y[history == 1]] = value
return mymap
def main():
shape = (100, 100)
mymap = np.zeros(shape)
mymap[70:90, 40:60] = 1
mymap[10:30, 40:60] = 1
ob = obeservation(angle=360, beems=500)
_,res, beemsLayer,lidar_map = ob.observe(mymap=mymap, location=(50, 50), theta=0)
print(res)
# print(intense)
mymap[beemsLayer == 1] = 2
plt.imshow(lidar_map)
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.imshow",
"numpy.clip",
"numpy.ones",
"numpy.int32",
"numpy.logical_or",
"numpy.zeros",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.zeros_like",
"matplotlib.pyplot.show"
] | [((2626, 2641), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2634, 2641), True, 'import numpy as np\n'), ((2898, 2919), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lidar_map'], {}), '(lidar_map)\n', (2908, 2919), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2932, 2934), True, 'import matplotlib.pyplot as plt\n'), ((558, 575), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mymap'], {}), '(mymap)\n', (568, 575), True, 'import matplotlib.pyplot as plt\n'), ((584, 594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (592, 594), True, 'import matplotlib.pyplot as plt\n'), ((748, 799), 'numpy.linspace', 'np.linspace', (['angle_start', 'angle_end'], {'num': 'self.beems'}), '(angle_start, angle_end, num=self.beems)\n', (759, 799), True, 'import numpy as np\n'), ((821, 841), 'numpy.zeros_like', 'np.zeros_like', (['mymap'], {}), '(mymap)\n', (834, 841), True, 'import numpy as np\n'), ((865, 885), 'numpy.zeros', 'np.zeros', (['self.beems'], {}), '(self.beems)\n', (873, 885), True, 'import numpy as np\n'), ((910, 930), 'numpy.zeros', 'np.zeros', (['self.beems'], {}), '(self.beems)\n', (918, 930), True, 'import numpy as np\n'), ((975, 989), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (981, 989), True, 'import numpy as np\n'), ((1009, 1023), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (1015, 1023), True, 'import numpy as np\n'), ((1042, 1061), 'numpy.ones', 'np.ones', (['self.beems'], {}), '(self.beems)\n', (1049, 1061), True, 'import numpy as np\n'), ((1175, 1218), 'numpy.int32', 'np.int32', (['(location[0] + distance * cosangle)'], {}), '(location[0] + distance * cosangle)\n', (1183, 1218), True, 'import numpy as np\n'), ((1235, 1278), 'numpy.int32', 'np.int32', (['(location[1] + distance * sinangle)'], {}), '(location[1] + distance * sinangle)\n', (1243, 1278), True, 'import numpy as np\n'), ((1322, 1355), 'numpy.clip', 'np.clip', (['x', '(0)', '(mymap.shape[0] - 1)'], {}), '(x, 0, mymap.shape[0] - 1)\n', (1329, 1355), True, 'import numpy as np\n'), ((1372, 1405), 'numpy.clip', 'np.clip', (['y', '(0)', '(mymap.shape[1] - 1)'], {}), '(y, 0, mymap.shape[1] - 1)\n', (1379, 1405), True, 'import numpy as np\n'), ((1580, 1629), 'numpy.logical_or', 'np.logical_or', (['(mymap[x, y] == 0)', '(mymap[x, y] == 2)'], {}), '(mymap[x, y] == 0, mymap[x, y] == 2)\n', (1593, 1629), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# System imports
import argparse
import sys
import serial
# Data processing imports
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns
font = {'family' : 'normal',
'weight' : 'bold'}
import matplotlib
matplotlib.rc('font', **font)
def main(delays_file):
delays = np.load(delays_file)
delays *= 1e6;
plt.plot(delays)
axes = plt.gca();
axes.set_xlim([0,len(delays)])
plt.ylabel('Vrijeme kašnjenja (${\mu}s$)', fontsize=20)
plt.xlabel('Uzorci', fontsize=20)
plt.show()
plt.figure(0)
n, bins, patches = plt.hist(delays, 50, normed=True,
histtype='step');
y = mlab.normpdf(bins,
np.mean(delays),
np.std(delays))
plt.show()
plt.figure(1)
plt.plot(bins, y)
plt.xlabel('Vrijeme kašnjenja (${\mu}s$)', fontsize=20)
plt.ylabel('Funkcija gustoće vjerojatnosti', fontsize=20)
plt.show();
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--delays_file', type=str, default='novo.npy')
ARGS, other = parser.parse_known_args()
main(ARGS.delays_file);
| [
"numpy.mean",
"matplotlib.pyplot.hist",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"numpy.std",
"numpy.load",
"matplotlib.pyplot.show"
] | [((284, 313), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (297, 313), False, 'import matplotlib\n'), ((352, 372), 'numpy.load', 'np.load', (['delays_file'], {}), '(delays_file)\n', (359, 372), True, 'import numpy as np\n'), ((398, 414), 'matplotlib.pyplot.plot', 'plt.plot', (['delays'], {}), '(delays)\n', (406, 414), True, 'import matplotlib.pyplot as plt\n'), ((426, 435), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (433, 435), True, 'import matplotlib.pyplot as plt\n'), ((476, 532), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vrijeme kašnjenja (${\\\\mu}s$)"""'], {'fontsize': '(20)'}), "('Vrijeme kašnjenja (${\\\\mu}s$)', fontsize=20)\n", (486, 532), True, 'import matplotlib.pyplot as plt\n'), ((536, 569), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Uzorci"""'], {'fontsize': '(20)'}), "('Uzorci', fontsize=20)\n", (546, 569), True, 'import matplotlib.pyplot as plt\n'), ((574, 584), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (582, 584), True, 'import matplotlib.pyplot as plt\n'), ((590, 603), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (600, 603), True, 'import matplotlib.pyplot as plt\n'), ((627, 677), 'matplotlib.pyplot.hist', 'plt.hist', (['delays', '(50)'], {'normed': '(True)', 'histtype': '"""step"""'}), "(delays, 50, normed=True, histtype='step')\n", (635, 677), True, 'import matplotlib.pyplot as plt\n'), ((772, 782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (780, 782), True, 'import matplotlib.pyplot as plt\n'), ((787, 800), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (797, 800), True, 'import matplotlib.pyplot as plt\n'), ((805, 822), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', 'y'], {}), '(bins, y)\n', (813, 822), True, 'import matplotlib.pyplot as plt\n'), ((827, 883), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Vrijeme kašnjenja (${\\\\mu}s$)"""'], {'fontsize': '(20)'}), "('Vrijeme kašnjenja (${\\\\mu}s$)', fontsize=20)\n", (837, 883), True, 'import matplotlib.pyplot as plt\n'), ((887, 944), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Funkcija gustoće vjerojatnosti"""'], {'fontsize': '(20)'}), "('Funkcija gustoće vjerojatnosti', fontsize=20)\n", (897, 944), True, 'import matplotlib.pyplot as plt\n'), ((949, 959), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (957, 959), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1027), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1025, 1027), False, 'import argparse\n'), ((725, 740), 'numpy.mean', 'np.mean', (['delays'], {}), '(delays)\n', (732, 740), True, 'import numpy as np\n'), ((751, 765), 'numpy.std', 'np.std', (['delays'], {}), '(delays)\n', (757, 765), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# coding: utf-8
"""
@file: writer.py
@description:
@author: <NAME>
@email: <EMAIL>
@last modified by: <NAME>
change log:
2021/07/05 create file.
2022/02/09 save raw data and result
"""
from stereo.core.stereo_exp_data import StereoExpData
from stereo.log_manager import logger
from scipy.sparse import csr_matrix, issparse
import h5py
from stereo.io import h5ad
import pickle
import numpy as np
from copy import deepcopy
def write_h5ad(data, result=True, raw=True):
"""
write the StereoExpData into h5ad file.
:param data: the StereoExpData object.
:param result: whether to save result and res_key
:param raw: whether to save raw data
:return:
"""
if data.output is None:
logger.error("The output path must be set before writing.")
with h5py.File(data.output, mode='w') as f:
h5ad.write(data.genes, f, 'genes')
h5ad.write(data.cells, f, 'cells')
h5ad.write(data.position, f, 'position')
if issparse(data.exp_matrix):
sp_format = 'csr' if isinstance(data.exp_matrix, csr_matrix) else 'csc'
h5ad.write(data.exp_matrix, f, 'exp_matrix', sp_format)
else:
h5ad.write(data.exp_matrix, f, 'exp_matrix')
h5ad.write(data.bin_type, f, 'bin_type')
if raw is True:
same_genes = np.array_equal(data.tl.raw.gene_names, data.gene_names)
same_cells = np.array_equal(data.tl.raw.gene_names, data.gene_names)
if not same_genes:
# if raw genes differ from genes
h5ad.write(data.tl.raw.genes, f, 'genes@raw')
if not same_cells:
# if raw cells differ from cells
h5ad.write(data.tl.raw.cells, f, 'cells@raw')
if not (same_genes | same_cells):
# if either raw genes or raw cells are different
h5ad.write(data.tl.raw.position, f, 'position@raw')
# save raw exp_matrix
if issparse(data.tl.raw.exp_matrix):
sp_format = 'csr' if isinstance(data.tl.raw.exp_matrix, csr_matrix) else 'csc'
h5ad.write(data.tl.raw.exp_matrix, f, 'exp_matrix@raw', sp_format)
else:
h5ad.write(data.tl.raw.exp_matrix, f, 'exp_matrix@raw')
if result is True:
# write key_record
key_record = deepcopy(data.tl.key_record)
supported_keys = ['hvg', 'pca', 'neighbors', 'umap', 'cluster', 'marker_genes'] # 'sct', 'spatial_hotspot'
for analysis_key in data.tl.key_record.keys():
if analysis_key not in supported_keys:
key_record.pop(analysis_key)
h5ad.write_key_record(f, 'key_record', key_record)
for analysis_key, res_keys in key_record.items():
for res_key in res_keys:
# write result[res_key]
if analysis_key == 'hvg':
# interval to str
hvg_df = deepcopy(data.tl.result[res_key])
hvg_df.mean_bin = [str(interval) for interval in data.tl.result[res_key].mean_bin]
h5ad.write(hvg_df, f, f'{res_key}@hvg') # -> dataframe
if analysis_key in ['pca', 'umap']:
h5ad.write(data.tl.result[res_key].values, f, f'{res_key}@{analysis_key}') # -> array
if analysis_key == 'neighbors':
for neighbor_key, value in data.tl.result[res_key].items():
if issparse(value):
sp_format = 'csr' if isinstance(value, csr_matrix) else 'csc'
h5ad.write(value, f, f'{neighbor_key}@{res_key}@neighbors', sp_format) # -> csr_matrix
else:
h5ad.write(value, f, f'{neighbor_key}@{res_key}@neighbors') # -> Neighbors
if analysis_key == 'cluster':
h5ad.write(data.tl.result[res_key], f, f'{res_key}@cluster') # -> dataframe
if analysis_key == 'marker_genes':
clusters = list(data.tl.result[res_key].keys())
h5ad.write(clusters, f, f'clusters_record@{res_key}@marker_genes') # -> list
for cluster, df in data.tl.result[res_key].items():
h5ad.write(df, f, f'{cluster}@{res_key}@marker_genes') # -> dataframe
if analysis_key == 'sct':
# tuple: (StereoExpData, dict-17 keys with different type)
# st_exp_data = data.tl.result[res_key][0]
# sct_dict = data.tl.result[res_key][1]
# if not np.array_equal(data.exp_matrix, st_exp_data.exp_matrix):
# h5ad.write(st_exp_data.genes, f, f'genes@{res_key}@sct')
# h5ad.write(st_exp_data.cells, f, f'cells@{res_key}@sct')
# h5ad.write(st_exp_data.position, f, f'position@{res_key}@sct')
# if issparse(st_exp_data.exp_matrix):
# sp_format = 'csr' if isinstance(st_exp_data.exp_matrix, csr_matrix) else 'csc'
# h5ad.write(st_exp_data.exp_matrix, f, f'exp_matrix@{res_key}@sct', sp_format)
# else:
# h5ad.write(st_exp_data.exp_matrix, f, f'exp_matrix@{res_key}@sct')
# h5ad.write_sct(f, f'sct_dict@{res_key}@sct', sct_dict)
pass
if analysis_key == 'spatial_hotspot':
# Hotspot object
pass
def write(data, output=None, output_type='h5ad', *args, **kwargs):
"""
write the data as a h5ad file.
:param: data: the StereoExpData object.
:param: output: the output path. StereoExpData's output will be reset if the output is not None.
:param: output_type: the output type. StereoExpData's output will be written in output_type.
Default setting is h5ad.
:return:
"""
if not isinstance(data, StereoExpData):
raise TypeError
if output is not None:
data.output = output
if output_type == 'h5ad':
write_h5ad(data, *args, **kwargs)
def save_pkl(obj, output):
with open(output, "wb") as f:
pickle.dump(obj, f)
f.close()
| [
"stereo.io.h5ad.write",
"pickle.dump",
"stereo.io.h5ad.write_key_record",
"scipy.sparse.issparse",
"h5py.File",
"stereo.log_manager.logger.error",
"numpy.array_equal",
"copy.deepcopy"
] | [((750, 809), 'stereo.log_manager.logger.error', 'logger.error', (['"""The output path must be set before writing."""'], {}), "('The output path must be set before writing.')\n", (762, 809), False, 'from stereo.log_manager import logger\n'), ((819, 851), 'h5py.File', 'h5py.File', (['data.output'], {'mode': '"""w"""'}), "(data.output, mode='w')\n", (828, 851), False, 'import h5py\n'), ((866, 900), 'stereo.io.h5ad.write', 'h5ad.write', (['data.genes', 'f', '"""genes"""'], {}), "(data.genes, f, 'genes')\n", (876, 900), False, 'from stereo.io import h5ad\n'), ((909, 943), 'stereo.io.h5ad.write', 'h5ad.write', (['data.cells', 'f', '"""cells"""'], {}), "(data.cells, f, 'cells')\n", (919, 943), False, 'from stereo.io import h5ad\n'), ((952, 992), 'stereo.io.h5ad.write', 'h5ad.write', (['data.position', 'f', '"""position"""'], {}), "(data.position, f, 'position')\n", (962, 992), False, 'from stereo.io import h5ad\n'), ((1004, 1029), 'scipy.sparse.issparse', 'issparse', (['data.exp_matrix'], {}), '(data.exp_matrix)\n', (1012, 1029), False, 'from scipy.sparse import csr_matrix, issparse\n'), ((1262, 1302), 'stereo.io.h5ad.write', 'h5ad.write', (['data.bin_type', 'f', '"""bin_type"""'], {}), "(data.bin_type, f, 'bin_type')\n", (1272, 1302), False, 'from stereo.io import h5ad\n'), ((6479, 6498), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (6490, 6498), False, 'import pickle\n'), ((1127, 1182), 'stereo.io.h5ad.write', 'h5ad.write', (['data.exp_matrix', 'f', '"""exp_matrix"""', 'sp_format'], {}), "(data.exp_matrix, f, 'exp_matrix', sp_format)\n", (1137, 1182), False, 'from stereo.io import h5ad\n'), ((1209, 1253), 'stereo.io.h5ad.write', 'h5ad.write', (['data.exp_matrix', 'f', '"""exp_matrix"""'], {}), "(data.exp_matrix, f, 'exp_matrix')\n", (1219, 1253), False, 'from stereo.io import h5ad\n'), ((1353, 1408), 'numpy.array_equal', 'np.array_equal', (['data.tl.raw.gene_names', 'data.gene_names'], {}), '(data.tl.raw.gene_names, data.gene_names)\n', (1367, 1408), True, 'import numpy as np\n'), ((1434, 1489), 'numpy.array_equal', 'np.array_equal', (['data.tl.raw.gene_names', 'data.gene_names'], {}), '(data.tl.raw.gene_names, data.gene_names)\n', (1448, 1489), True, 'import numpy as np\n'), ((2002, 2034), 'scipy.sparse.issparse', 'issparse', (['data.tl.raw.exp_matrix'], {}), '(data.tl.raw.exp_matrix)\n', (2010, 2034), False, 'from scipy.sparse import csr_matrix, issparse\n'), ((2388, 2416), 'copy.deepcopy', 'deepcopy', (['data.tl.key_record'], {}), '(data.tl.key_record)\n', (2396, 2416), False, 'from copy import deepcopy\n'), ((2711, 2761), 'stereo.io.h5ad.write_key_record', 'h5ad.write_key_record', (['f', '"""key_record"""', 'key_record'], {}), "(f, 'key_record', key_record)\n", (2732, 2761), False, 'from stereo.io import h5ad\n'), ((1586, 1631), 'stereo.io.h5ad.write', 'h5ad.write', (['data.tl.raw.genes', 'f', '"""genes@raw"""'], {}), "(data.tl.raw.genes, f, 'genes@raw')\n", (1596, 1631), False, 'from stereo.io import h5ad\n'), ((1728, 1773), 'stereo.io.h5ad.write', 'h5ad.write', (['data.tl.raw.cells', 'f', '"""cells@raw"""'], {}), "(data.tl.raw.cells, f, 'cells@raw')\n", (1738, 1773), False, 'from stereo.io import h5ad\n'), ((1901, 1952), 'stereo.io.h5ad.write', 'h5ad.write', (['data.tl.raw.position', 'f', '"""position@raw"""'], {}), "(data.tl.raw.position, f, 'position@raw')\n", (1911, 1952), False, 'from stereo.io import h5ad\n'), ((2147, 2213), 'stereo.io.h5ad.write', 'h5ad.write', (['data.tl.raw.exp_matrix', 'f', '"""exp_matrix@raw"""', 'sp_format'], {}), "(data.tl.raw.exp_matrix, f, 'exp_matrix@raw', sp_format)\n", (2157, 2213), False, 'from stereo.io import h5ad\n'), ((2248, 2303), 'stereo.io.h5ad.write', 'h5ad.write', (['data.tl.raw.exp_matrix', 'f', '"""exp_matrix@raw"""'], {}), "(data.tl.raw.exp_matrix, f, 'exp_matrix@raw')\n", (2258, 2303), False, 'from stereo.io import h5ad\n'), ((3031, 3064), 'copy.deepcopy', 'deepcopy', (['data.tl.result[res_key]'], {}), '(data.tl.result[res_key])\n', (3039, 3064), False, 'from copy import deepcopy\n'), ((3196, 3235), 'stereo.io.h5ad.write', 'h5ad.write', (['hvg_df', 'f', 'f"""{res_key}@hvg"""'], {}), "(hvg_df, f, f'{res_key}@hvg')\n", (3206, 3235), False, 'from stereo.io import h5ad\n'), ((3332, 3406), 'stereo.io.h5ad.write', 'h5ad.write', (['data.tl.result[res_key].values', 'f', 'f"""{res_key}@{analysis_key}"""'], {}), "(data.tl.result[res_key].values, f, f'{res_key}@{analysis_key}')\n", (3342, 3406), False, 'from stereo.io import h5ad\n'), ((4033, 4093), 'stereo.io.h5ad.write', 'h5ad.write', (['data.tl.result[res_key]', 'f', 'f"""{res_key}@cluster"""'], {}), "(data.tl.result[res_key], f, f'{res_key}@cluster')\n", (4043, 4093), False, 'from stereo.io import h5ad\n'), ((4261, 4327), 'stereo.io.h5ad.write', 'h5ad.write', (['clusters', 'f', 'f"""clusters_record@{res_key}@marker_genes"""'], {}), "(clusters, f, f'clusters_record@{res_key}@marker_genes')\n", (4271, 4327), False, 'from stereo.io import h5ad\n'), ((3586, 3601), 'scipy.sparse.issparse', 'issparse', (['value'], {}), '(value)\n', (3594, 3601), False, 'from scipy.sparse import csr_matrix, issparse\n'), ((4443, 4497), 'stereo.io.h5ad.write', 'h5ad.write', (['df', 'f', 'f"""{cluster}@{res_key}@marker_genes"""'], {}), "(df, f, f'{cluster}@{res_key}@marker_genes')\n", (4453, 4497), False, 'from stereo.io import h5ad\n'), ((3729, 3799), 'stereo.io.h5ad.write', 'h5ad.write', (['value', 'f', 'f"""{neighbor_key}@{res_key}@neighbors"""', 'sp_format'], {}), "(value, f, f'{neighbor_key}@{res_key}@neighbors', sp_format)\n", (3739, 3799), False, 'from stereo.io import h5ad\n'), ((3883, 3942), 'stereo.io.h5ad.write', 'h5ad.write', (['value', 'f', 'f"""{neighbor_key}@{res_key}@neighbors"""'], {}), "(value, f, f'{neighbor_key}@{res_key}@neighbors')\n", (3893, 3942), False, 'from stereo.io import h5ad\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pickle
from PIL import Image
def compute_image_data_statistics(data_loader):
"""
Return the channel wise mean and std deviation for images loaded by `data_loader` (loads WebDataset defined in `datasets.py`)
"""
mean = 0.
std = 0.
n_samples = 0.
for images, bboxes, labels in data_loader:
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
n_samples += batch_samples
mean /= n_samples
std /= n_samples
return mean, std
def count_parameters(model):
"""
Return the number of trainable parameters in `model`
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def pkl_load(file_path):
"""
Load a pickle file at filt_path
"""
return pickle.load(open(file_path, 'rb'))
def print_and_log(msg, log_file, write_mode='a'):
"""
print `msg` (string) on stdout and also append ('a') or write ('w') (default 'a') it to `log_file`
"""
print(msg)
with open(log_file, write_mode) as f:
f.write(msg + '\n')
def print_confusion_matrix(c, class_names=None):
"""
c: np.array of shape [n_classes, n_classes] where
each row represents True labels
each col represents Pred labels
class_names: list of n_classes items each containing name of classes in order
if None (default), class names will be set to 0, 1, ..., n_classes-1
"""
c = c.astype(str)
n_classes = c.shape[0]
if class_names is None:
class_names = np.arange(n_classes).astype(str)
print( 'True \\ Pred\t%s' % ('\t'.join(class_names)) )
for i in range(n_classes):
print( '%s\t\t%s' % (class_names[i], '\t'.join(c[i])) )
def visualize_bbox(img_path, attn_wt_file, img_save_dir):
"""
Plot img and show all context bboxes on the img with attention scores
Target BBox is bold black, context bbox is either green (score >= 0.2) or red (score < 0.2)
attn_wt_file is a csv file containing 3 rows, 5 + 10*context_size cols
Each row contains plot data for a target class (Price, Title, Image)
Cols: 4 bbox coords, 1 label, 2*context_size*4 context bbox coords, 2*context_size attnetion values that sum to 1
Save 3 files corresponding to 3 classes in img_save_dir (must exist)
"""
class_names = {0:'BG', 1:'Price', 2:'Title', 3:'Image'}
img = Image.open(img_path).convert('RGB')
plt_data = np.loadtxt(attn_wt_file, delimiter=',')
context_size = int((plt_data.shape[1] - 5) / 10)
plt.rcParams.update({'font.size': 6})
for row in plt_data:
plt.imshow(img)
plt.title('Attention Visualization for class: ' + class_names[int(row[4])])
ax = plt.gca()
ax.add_patch(plt.Rectangle((row[0], row[1]), row[2], row[3], fill=False, edgecolor='black', linewidth=2))
for c in range(1, 2*context_size+1):
if row[4*c+1] == 0 and row[4*c+2] == 0 and row[4*c+3] == 0 and row[4*c+4] == 0:
continue
if row[4*(2*context_size+1) + c] >= 0.2:
ax.text(row[4*c+1], row[4*c+2], '%.1f' % (100*row[4*(2*context_size+1) + c]))
color = 'green'
else:
color = 'red'
ax.add_patch(plt.Rectangle((row[4*c+1], row[4*c+2]), row[4*c+3], row[4*c+4], fill=False, edgecolor=color, linewidth=1))
plt.axis('off')
plt.tight_layout()
plt.savefig('%s/%s_attn_%s.png' % (img_save_dir, img_path.rsplit('/',1)[-1][:-4], class_names[int(row[4])]), dpi=300, bbox_inches = 'tight', pad_inches = 0)
plt.close()
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.axis",
"numpy.loadtxt",
"numpy.arange"
] | [((2589, 2628), 'numpy.loadtxt', 'np.loadtxt', (['attn_wt_file'], {'delimiter': '""","""'}), "(attn_wt_file, delimiter=',')\n", (2599, 2628), True, 'import numpy as np\n'), ((2687, 2724), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 6}"], {}), "({'font.size': 6})\n", (2706, 2724), True, 'import matplotlib.pyplot as plt\n'), ((2758, 2773), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2768, 2773), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2880), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2878, 2880), True, 'import matplotlib.pyplot as plt\n'), ((3524, 3539), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3532, 3539), True, 'import matplotlib.pyplot as plt\n'), ((3548, 3566), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3564, 3566), True, 'import matplotlib.pyplot as plt\n'), ((3740, 3751), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3749, 3751), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2558), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2548, 2558), False, 'from PIL import Image\n'), ((2902, 2998), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(row[0], row[1])', 'row[2]', 'row[3]'], {'fill': '(False)', 'edgecolor': '"""black"""', 'linewidth': '(2)'}), "((row[0], row[1]), row[2], row[3], fill=False, edgecolor=\n 'black', linewidth=2)\n", (2915, 2998), True, 'import matplotlib.pyplot as plt\n'), ((1689, 1709), 'numpy.arange', 'np.arange', (['n_classes'], {}), '(n_classes)\n', (1698, 1709), True, 'import numpy as np\n'), ((3409, 3534), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(row[4 * c + 1], row[4 * c + 2])', 'row[4 * c + 3]', 'row[4 * c + 4]'], {'fill': '(False)', 'edgecolor': 'color', 'linewidth': '(1)'}), '((row[4 * c + 1], row[4 * c + 2]), row[4 * c + 3], row[4 * c +\n 4], fill=False, edgecolor=color, linewidth=1)\n', (3422, 3534), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
# Write a function that takes as input a list of numbers, and returns
# the list of values given by the softmax function.
def softmax(L):
valList = []
total = 0
for t in range(len(L)):
total += np.exp(L[t])
for i in range(len(L)):
a = np.exp(L[i]) / total
valList.append(a)
return valList
"""
GIVEN SOLUTION
import numpy as np
def softmax(L):
expL = np.exp(L)
sumExpL = sum(expL)
result = []
for i in expL:
result.append(i*1.0/sumExpL)
return result
# Note: The function np.divide can also be used here, as follows:
# def softmax(L):
# expL(np.exp(L))
# return np.divide (expL, expL.sum())
""" | [
"numpy.exp"
] | [((234, 246), 'numpy.exp', 'np.exp', (['L[t]'], {}), '(L[t])\n', (240, 246), True, 'import numpy as np\n'), ((287, 299), 'numpy.exp', 'np.exp', (['L[i]'], {}), '(L[i])\n', (293, 299), True, 'import numpy as np\n')] |
'''Q-learning demonstration on FrozenLake gym environment
'''
from os import system
from tqdm import tqdm
import numpy as np
import gym
# for training
MAX_EPISODES = 5000
MAX_MOVES_PER_EP = 200
# max number of hyperparamter combinations
MAX_COMBOS = 20
# max number of chances to be given to a hyperparameter combination
MAX_CHANCE = 5
# for testing
MAX_TESTS = 5000
# HYPERPARAM BOUNDS
HYP_BOUNDS = {
'alpha': {'min': 0.65, 'max': 0.90},
'gamma': {'min': 0.80, 'max': 0.95},
'decay_factor': {'min': -1, 'max': 1},
}
# INITIAL HYPERPARAMS
init_hyperparams = {}
init_hyperparams['alpha'] = 0.77
init_hyperparams['gamma'] = 0.91
init_hyperparams['decay_factor'] = 0.44 # actual decay factor is 10 raised to this value
PERFORM_TUNING = False
def search_random_hyperparams(cur_hyperparams):
'''Random Search implementation
'''
radius = 1.0 # radius of whole hypersphere
search_rad = radius / 3 # radius of hypersphere to search for next hyperparams
new_hyp = {}
for hyp in cur_hyperparams:
ave = (HYP_BOUNDS[hyp]['min'] + HYP_BOUNDS[hyp]['max']) / 2.0
rng = HYP_BOUNDS[hyp]['max'] - HYP_BOUNDS[hyp]['min']
# we add x2 to create hypersphere of radius 1
scaled_hyp = ((cur_hyperparams[hyp] - ave) / rng) * (radius/0.5)
# after scaling, choose uniformly for the next hyperparam
low = scaled_hyp - search_rad
high = scaled_hyp + search_rad
# check if still within hypersphere
low = low if (low >= -radius) else -radius
high = high if (high <= radius) else radius
# sample, then de-normalize
new_hyp[hyp] = np.random.uniform(low=low, high=high)
new_hyp[hyp] = ((new_hyp[hyp] * (0.5/radius)) * rng) + ave
return new_hyp
def tune_hyperparams(env):
'''Hyperparameter tuning
'''
# use initial hyperparams
hyperparams = init_hyperparams
# for saving the best hyperparams
best_hyperparams = hyperparams
best_ave_wins = 0
combo = 0
while True:
all_wins = []
for i in tqdm(range(MAX_CHANCE)):
Q, wins = get_q_table(env, hyperparams)
all_wins.append(wins)
# get average wins
ave_wins = sum(all_wins) / len(all_wins)
combo += 1
print("[Combo %d]\nWin Summary\n Average: %0.2f\n Lowest: %0.2f\n "
"Highest: %0.2f" % (combo, ave_wins, min(all_wins), max(all_wins)))
if ave_wins > best_ave_wins:
best_hyperparams = hyperparams
best_ave_wins = ave_wins
print("[New Best]\n alpha=%0.2f\n gamma=%0.2f\n "
"decay_factor=%0.2f\n"
% (best_hyperparams['alpha'],
best_hyperparams['gamma'],
10**best_hyperparams['decay_factor']))
if combo == MAX_COMBOS:
break
hyperparams = search_random_hyperparams(best_hyperparams)
return best_hyperparams
def get_q_table(env, hyperparams):
'''Create and optimize q-table
'''
# get hyperparams
alpha = hyperparams['alpha'] # learning rate
# discount factor: importance we give to future rewards
gamma = hyperparams['gamma']
df = hyperparams['decay_factor']
# Q-table initalization
# rows: states, cols: actions, cell value: expected reward
Q = np.zeros([env.observation_space.n, env.action_space.n])
wins = 0
for ep in range(MAX_EPISODES):
cur_state = env.reset()
for move in range(MAX_MOVES_PER_EP):
#env.render()
# add randomness to Q values, then pick best action
action = np.argmax(Q[cur_state, :] + \
np.random.randn(1, env.action_space.n)*(1./((ep+1)*(10**df))))
prev_state = cur_state
cur_state, reward, done, info = env.step(action)
# update Q-table
Q[prev_state, action] = Q[prev_state, action] + \
alpha*(reward + gamma*(np.max(Q[cur_state, :])) - \
Q[prev_state, action])
# current episode ended
if done:
wins += reward
break
return Q, wins
def test_q_table(test_num, env, Q, random_agent=False):
'''Run using optimized q-table or random agent
'''
wins = 0
#Q = np.random.random([env.observation_space.n, env.action_space.n])
for ep in range(MAX_TESTS):
cur_state = env.reset()
# fixed epsilon
epsilon = 0.009
if random_agent:
epsilon = 1
while True:
#system('clear')
#print("Test %d, Episode %d:" % (test_num+1, ep+1))
#env.render()
# generate probability of exploitation
exploit_prob = np.random.uniform()
if exploit_prob > epsilon:
# choose best action based on current state
action = np.argmax(Q[cur_state, :]) # get index of best action
else:
action = env.action_space.sample()
cur_state, reward, done, info = env.step(action)
# current episode ended
if done:
wins += reward
break
return wins
def run_tester(test_name, test_func, *args):
'''Wrapper function for testing environment
'''
all_wins = []
for i in tqdm(range(MAX_CHANCE)):
wins = test_func(i, *args)
all_wins.append(wins)
# get average wins
ave_wins = sum(all_wins) / len(all_wins)
print("[%s]\nWin Summary\n Average: %0.2f\n Lowest: %0.2f\n "
"Highest: %0.2f\n Ave. Win Rate: %0.4f\n"
% (test_name, ave_wins, min(all_wins), max(all_wins), ave_wins/MAX_EPISODES))
def main():
'''Main
'''
env = gym.make('FrozenLake8x8-v0')
if PERFORM_TUNING:
# perform Q-learning multiple times to determine best hyperparams
print("Performing Q-learning for %d hyperparameter combos..." % MAX_COMBOS)
hyperparams = tune_hyperparams(env)
else:
hyperparams = init_hyperparams
# get the best Q-table
best_wins = 0
print("Getting best Q-table...")
for i in tqdm(range(MAX_CHANCE)):
Q, wins = get_q_table(env, hyperparams)
print("Wins: %d" % wins)
if wins >= best_wins:
best_Q = Q
#input("Random agent will be tested. Press Enter to continue...")
print("Testing random agent...")
# test how well a random agent performs
run_tester("Random Agent", test_q_table,
env, best_Q, True)
#input("Learned Q-table will be tested. Press Enter to continue...")
print("Testing learned Q-table...")
# test how well a the learned agent performs
run_tester("Agent Q", test_q_table,
env, best_Q)
env.close()
if __name__ == "__main__":
main()
| [
"numpy.argmax",
"numpy.max",
"numpy.zeros",
"numpy.random.uniform",
"numpy.random.randn",
"gym.make"
] | [((3337, 3392), 'numpy.zeros', 'np.zeros', (['[env.observation_space.n, env.action_space.n]'], {}), '([env.observation_space.n, env.action_space.n])\n', (3345, 3392), True, 'import numpy as np\n'), ((5807, 5835), 'gym.make', 'gym.make', (['"""FrozenLake8x8-v0"""'], {}), "('FrozenLake8x8-v0')\n", (5815, 5835), False, 'import gym\n'), ((1640, 1677), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'low', 'high': 'high'}), '(low=low, high=high)\n', (1657, 1677), True, 'import numpy as np\n'), ((4803, 4822), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4820, 4822), True, 'import numpy as np\n'), ((4948, 4974), 'numpy.argmax', 'np.argmax', (['Q[cur_state, :]'], {}), '(Q[cur_state, :])\n', (4957, 4974), True, 'import numpy as np\n'), ((3683, 3721), 'numpy.random.randn', 'np.random.randn', (['(1)', 'env.action_space.n'], {}), '(1, env.action_space.n)\n', (3698, 3721), True, 'import numpy as np\n'), ((3994, 4017), 'numpy.max', 'np.max', (['Q[cur_state, :]'], {}), '(Q[cur_state, :])\n', (4000, 4017), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import unittest
from simple_ml.evaluation import *
from simple_ml.evaluation import _check_input, _get_binary_confusion_matrix, _gen_binary_pairs
import numpy as np
from numpy.testing import assert_array_equal
from simple_ml.base.base_error import *
class TestEvaluation(unittest.TestCase):
def test_regression_plot(self):
x_train = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
y_train = np.array([0.1, 0.2, 0.4, 0.6])
x_test = np.array([[1, 3, 4, 5]])
y_test = np.array([0.12])
self.assertRaises(FeatureNumberMismatchError, regression_plot, x_train, y_train, x_test, y_test, y_test)
x_test = np.array([[1,3,4]])
y_test = np.array([0.12, 0.13])
self.assertRaises(SampleNumberMismatchError, regression_plot, x_train, y_train, x_test, y_test, y_test)
def test_check_input(self):
y_true = np.array([[1, 2, 3], [4, 5, 6]])
y_predict = y_true.copy()
self.assertRaises(InputTypeError, _check_input, y_predict, y_true)
y_true = np.array([1, 2])
y_predict = np.array([1, 2, 3])
self.assertRaises(LabelLengthMismatchError, _check_input, y_predict, y_true)
def test_confusion_matrix(self):
y1 = np.array([1, 0, 0, 1])
y2 = np.array([1, 0, 0, 2])
self.assertRaises(ParamInputError, _get_binary_confusion_matrix, y1, y2)
y2 = np.array([1, 0, 0, 0])
confusion_matrix = _get_binary_confusion_matrix(y1, y2)
assert_array_equal(confusion_matrix, np.array([[1, 1],
[0, 2]]))
def test_classify_accuracy(self):
y1 = np.array([1, 0, 0, 1])
y2 = np.array([1, 0, 1, 1])
score = classify_accuracy(y1, y2)
self.assertEqual(score, 0.75)
def test_classify_precision(self):
y1 = np.array([1, 1, 0, 0, 1])
y2 = np.array([1, 0, 1, 0, 0])
score = classify_precision(y1, y2)
self.assertEqual(score, 1/3)
def test_classify_recall(self):
y1 = np.array([1, 1, 0, 0, 1])
y2 = np.array([1, 0, 1, 0, 0])
score = classify_recall(y1, y2)
self.assertEqual(score, 0.5)
def test_classify_f1(self):
y1 = np.array([1, 1, 0, 0, 1])
y2 = np.array([1, 0, 1, 0, 0])
score = classify_f1(y1, y2)
self.assertEqual(score, (2 * 0.5 / 3) / (0.5 + 1/3))
def test_gen_binary_paris(self):
y1 = np.array([1, 1])
y2 = np.array([1, 0])
res = list(_gen_binary_pairs(y1, y2))
assert_array_equal(res, [(np.array([0, 0]), np.array([0, 1])), (np.array([1, 1]), np.array([1, 0]))])
def test_auc(self):
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"simple_ml.evaluation._get_binary_confusion_matrix",
"simple_ml.evaluation._gen_binary_pairs"
] | [((2826, 2841), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2839, 2841), False, 'import unittest\n'), ((372, 429), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n', (380, 429), True, 'import numpy as np\n'), ((532, 562), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.4, 0.6]'], {}), '([0.1, 0.2, 0.4, 0.6])\n', (540, 562), True, 'import numpy as np\n'), ((580, 604), 'numpy.array', 'np.array', (['[[1, 3, 4, 5]]'], {}), '([[1, 3, 4, 5]])\n', (588, 604), True, 'import numpy as np\n'), ((622, 638), 'numpy.array', 'np.array', (['[0.12]'], {}), '([0.12])\n', (630, 638), True, 'import numpy as np\n'), ((770, 791), 'numpy.array', 'np.array', (['[[1, 3, 4]]'], {}), '([[1, 3, 4]])\n', (778, 791), True, 'import numpy as np\n'), ((807, 829), 'numpy.array', 'np.array', (['[0.12, 0.13]'], {}), '([0.12, 0.13])\n', (815, 829), True, 'import numpy as np\n'), ((992, 1024), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (1000, 1024), True, 'import numpy as np\n'), ((1151, 1167), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1159, 1167), True, 'import numpy as np\n'), ((1188, 1207), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1196, 1207), True, 'import numpy as np\n'), ((1344, 1366), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (1352, 1366), True, 'import numpy as np\n'), ((1380, 1402), 'numpy.array', 'np.array', (['[1, 0, 0, 2]'], {}), '([1, 0, 0, 2])\n', (1388, 1402), True, 'import numpy as np\n'), ((1497, 1519), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (1505, 1519), True, 'import numpy as np\n'), ((1547, 1583), 'simple_ml.evaluation._get_binary_confusion_matrix', '_get_binary_confusion_matrix', (['y1', 'y2'], {}), '(y1, y2)\n', (1575, 1583), False, 'from simple_ml.evaluation import _check_input, _get_binary_confusion_matrix, _gen_binary_pairs\n'), ((1764, 1786), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (1772, 1786), True, 'import numpy as np\n'), ((1800, 1822), 'numpy.array', 'np.array', (['[1, 0, 1, 1]'], {}), '([1, 0, 1, 1])\n', (1808, 1822), True, 'import numpy as np\n'), ((1956, 1981), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 1]'], {}), '([1, 1, 0, 0, 1])\n', (1964, 1981), True, 'import numpy as np\n'), ((1995, 2020), 'numpy.array', 'np.array', (['[1, 0, 1, 0, 0]'], {}), '([1, 0, 1, 0, 0])\n', (2003, 2020), True, 'import numpy as np\n'), ((2151, 2176), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 1]'], {}), '([1, 1, 0, 0, 1])\n', (2159, 2176), True, 'import numpy as np\n'), ((2190, 2215), 'numpy.array', 'np.array', (['[1, 0, 1, 0, 0]'], {}), '([1, 0, 1, 0, 0])\n', (2198, 2215), True, 'import numpy as np\n'), ((2339, 2364), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 1]'], {}), '([1, 1, 0, 0, 1])\n', (2347, 2364), True, 'import numpy as np\n'), ((2378, 2403), 'numpy.array', 'np.array', (['[1, 0, 1, 0, 0]'], {}), '([1, 0, 1, 0, 0])\n', (2386, 2403), True, 'import numpy as np\n'), ((2552, 2568), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2560, 2568), True, 'import numpy as np\n'), ((2582, 2598), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2590, 2598), True, 'import numpy as np\n'), ((1629, 1655), 'numpy.array', 'np.array', (['[[1, 1], [0, 2]]'], {}), '([[1, 1], [0, 2]])\n', (1637, 1655), True, 'import numpy as np\n'), ((2618, 2643), 'simple_ml.evaluation._gen_binary_pairs', '_gen_binary_pairs', (['y1', 'y2'], {}), '(y1, y2)\n', (2635, 2643), False, 'from simple_ml.evaluation import _check_input, _get_binary_confusion_matrix, _gen_binary_pairs\n'), ((2679, 2695), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2687, 2695), True, 'import numpy as np\n'), ((2697, 2713), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2705, 2713), True, 'import numpy as np\n'), ((2717, 2733), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2725, 2733), True, 'import numpy as np\n'), ((2735, 2751), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2743, 2751), True, 'import numpy as np\n')] |
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
#parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument("-f", "--file", type=str, help="Voxel file (.vxl)", required=True)
#args = parser.parse_args()
## Voxel-A lens
k1 = -0.1583968
k2 = 0.06113919
k3 = 0.09898978
p1 = 0.001591975
p2 = -0.0001962754
x = np.linspace(0, 0.9, 200)
y = 0
r2 = x*x
r4 = r2*r2
r6 = r4*r2
x1 = x*(1 + k1*r2 + k2*r4 + k3*r6) + 2*p1*x*y + p2*(r2 + 2*x*x)
y1 = y*(1 + k1*r2 + k2*r4 + k3*r6) + p1*(r2 + 2*y*y) + p2*x*y
## Tintin lens
k1 = 0.909882
k2 = -3.559455
k3 = 3.626591
p1 = 0.047604
p2 = -0.005546
x2 = x*(1 + k1*r2 + k2*r4 + k3*r6) + 2*p1*x*y + p2*(r2 + 2*x*x)
y2 = y*(1 + k1*r2 + k2*r4 + k3*r6) + p1*(r2 + 2*y*y) + p2*x*y
r2 = x1*x1 + y1*y1
r4 = r2*r2
r6 = r4*r2
x3 = x1*(1 + k1*r2 + k2*r4 + k3*r6) + 2*p1*x1*y1 + p2*(r2 + 2*x1*x1)
y3 = y1*(1 + k1*r2 + k2*r4 + k3*r6) + p1*(r2 + 2*y1*y1) + p2*x1*y1
plt.plot(x, x1, x, x2, 'r', x, x3, 'k')
plt.grid(True)
plt.legend(['Voxel-A', 'TintinCDK', 'Distorted to Corrected'])
plt.show() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((395, 419), 'numpy.linspace', 'np.linspace', (['(0)', '(0.9)', '(200)'], {}), '(0, 0.9, 200)\n', (406, 419), True, 'import numpy as np\n'), ((984, 1023), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x1', 'x', 'x2', '"""r"""', 'x', 'x3', '"""k"""'], {}), "(x, x1, x, x2, 'r', x, x3, 'k')\n", (992, 1023), True, 'import matplotlib.pyplot as plt\n'), ((1024, 1038), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1032, 1038), True, 'import matplotlib.pyplot as plt\n'), ((1039, 1101), 'matplotlib.pyplot.legend', 'plt.legend', (["['Voxel-A', 'TintinCDK', 'Distorted to Corrected']"], {}), "(['Voxel-A', 'TintinCDK', 'Distorted to Corrected'])\n", (1049, 1101), True, 'import matplotlib.pyplot as plt\n'), ((1102, 1112), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1110, 1112), True, 'import matplotlib.pyplot as plt\n')] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='1'
from os import listdir
import sys
import time
import argparse
import tools.ops
import subprocess
import numpy as np
import tensorflow as tf
import scipy.misc as sm
from models.mfb_net_cross import *
from tools.utilities import *
from tools.ops import *
parser = argparse.ArgumentParser()
parser.add_argument('-lr', dest='lr', type=float, default='1e-4', help='original learning rate')
parser.add_argument('-batch_size', dest='batch_size', type=int, default='10', help='batch_size')
args = parser.parse_args()
flags = tf.app.flags
flags.DEFINE_float('lr', args.lr, 'Original learning rate.')
flags.DEFINE_integer('batch_size', args.batch_size, 'Batch size.')
flags.DEFINE_integer('num_epochs', 500, 'Number of epochs.')
flags.DEFINE_integer('num_gpus', 4, 'Number of GPUs.')
flags.DEFINE_integer('seq_length', 16, 'Length of each video clip.')
flags.DEFINE_integer('height', 128, 'Height of video frame.')
flags.DEFINE_integer('width', 128, 'Width of video frame.')
flags.DEFINE_integer('channel', 3, 'Number of channels for each frame.')
flags.DEFINE_integer('num_sample', 10060, 'Number of samples in this dataset.')
flags.DEFINE_float('wd', 0.001, 'Weight decay rate.')
FLAGS = flags.FLAGS
prefix = 'mfb_cross'
model_save_dir = './ckpt/' + prefix
logs_save_dir = './logs/' + prefix
pred_save_dir = './output/' + prefix
loss_save_dir = './loss'
train_list_path = './dataset/trainlist.txt'
dataset_path = './dataset/UCF-101-tf-records'
evaluation_job = './jobs/mfb_cross_val'
use_pretrained_model = True
save_predictions = True
def run_training():
# Create model directory
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
model_filename = "./mfb_baseline_ucf24.model"
# Consturct computational graph
tower_grads = []
tower_losses, tower_ffg_losses, tower_fbg_losses, tower_lfg_losses, tower_feat_losses, tower_wd_losses = [], [], [], [], [], []
tower_ffg_m_losses, tower_fbg_m_losses, tower_lfg_m_losses = [], [], []
global_step = tf.get_variable(
'global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False
)
starter_learning_rate = FLAGS.lr
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.5, staircase=True)
opt = tf.train.AdamOptimizer(learning_rate)
# Create a session for running Ops on the Graph.
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = None
train_list_file = open(train_list_path, 'r')
train_list = train_list_file.read().splitlines()
for i, line in enumerate(train_list):
train_list[i] = os.path.join(dataset_path, train_list[i])
assert(len(train_list) % FLAGS.num_gpus == 0)
num_for_each_gpu = len(train_list) // FLAGS.num_gpus
clips_list, img_masks_list, loss_masks_list = [], [], []
with sess.as_default():
for i in range(FLAGS.num_gpus):
clips, img_masks, loss_masks = input_pipeline(train_list[i*num_for_each_gpu:(i+1)*num_for_each_gpu], \
FLAGS.batch_size, read_threads=4, num_epochs=FLAGS.num_epochs, is_training=True)
clips_list.append(clips)
img_masks_list.append(img_masks)
loss_masks_list.append(loss_masks)
mfb_list = []
with tf.variable_scope('vars') as var_scope:
for gpu_index in range(FLAGS.num_gpus):
with tf.device('/gpu:%d' % (gpu_index)):
with tf.name_scope('%s_%d' % ('tower', gpu_index)) as scope:
# construct model
mfb = mfb_net(clips_list[gpu_index], FLAGS.height, FLAGS.width, FLAGS.seq_length, FLAGS.channel, FLAGS.batch_size)
mfb_list.append(mfb)
loss, first_fg_loss, first_bg_loss, last_fg_loss, feat_loss, wd_loss = \
tower_loss(scope, mfb, clips_list[gpu_index], img_masks_list[gpu_index], loss_masks_list[gpu_index])
var_scope.reuse_variables()
vars_to_optimize = tf.trainable_variables()
grads = opt.compute_gradients(loss, var_list=vars_to_optimize)
tower_grads.append(grads)
tower_losses.append(loss)
tower_ffg_losses.append(first_fg_loss)
tower_fbg_losses.append(first_bg_loss)
tower_lfg_losses.append(last_fg_loss)
tower_feat_losses.append(feat_loss)
tower_wd_losses.append(wd_loss)
# concatenate the losses of all towers
loss_op = tf.reduce_mean(tower_losses)
ffg_loss_op = tf.reduce_mean(tower_ffg_losses)
fbg_loss_op = tf.reduce_mean(tower_fbg_losses)
lfg_loss_op = tf.reduce_mean(tower_lfg_losses)
feat_loss_op = tf.reduce_mean(tower_feat_losses)
wd_loss_op = tf.reduce_mean(tower_wd_losses)
tf.summary.scalar('loss', loss_op)
tf.summary.scalar('ffg_loss', ffg_loss_op)
tf.summary.scalar('fbg_loss', fbg_loss_op)
tf.summary.scalar('lfg_loss', lfg_loss_op)
tf.summary.scalar('feat_loss', feat_loss_op)
tf.summary.scalar('wd_loss', wd_loss_op)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
grads = average_gradients(tower_grads)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grads, global_step=global_step)
# saver for saving checkpoints
saver = tf.train.Saver(max_to_keep=10)
init = tf.initialize_all_variables()
sess.run(init)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
if use_pretrained_model:
print('[*] Loading checkpoint ...')
model = tf.train.latest_checkpoint(model_save_dir)
if model is not None:
saver.restore(sess, model)
print('[*] Loading success: %s!'%model)
else:
print('[*] Loading failed ...')
# Create summary writer
merged = tf.summary.merge_all()
if not os.path.exists(logs_save_dir):
os.makedirs(logs_save_dir)
sum_writer = tf.summary.FileWriter(logs_save_dir, sess.graph)
# Create prediction output folder
if not os.path.exists(pred_save_dir):
os.makedirs(pred_save_dir)
# Create loss output folder
if not os.path.exists(loss_save_dir):
os.makedirs(loss_save_dir)
loss_file = open(os.path.join(loss_save_dir, prefix+'.txt'), 'w')
total_steps = (FLAGS.num_sample / (FLAGS.num_gpus * FLAGS.batch_size)) * FLAGS.num_epochs
# start queue runner
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
with sess.as_default():
print('\n\n\n*********** start training ***********\n\n\n')
while not coord.should_stop():
# Run training steps
start_time = time.time()
sess.run(train_op)
duration = time.time() - start_time
step = global_step.eval()
if step == 1 or step % 10 == 0: # evaluate loss
loss, ffg_loss, fbg_loss, lfg_loss, feat_loss, wd_loss, lr = \
sess.run([loss_op, ffg_loss_op, fbg_loss_op, lfg_loss_op, feat_loss_op, wd_loss_op, learning_rate])
line = 'step %d/%d, loss=%.8f, ffg=%.8f, fbg=%.8f, lfg=%.8f, feat=%.8f, lwd=%.8f, dur=%.3f, lr=%.8f' \
%(step, total_steps, loss, ffg_loss, fbg_loss, lfg_loss, feat_loss, wd_loss, duration, lr)
print(line)
loss_file.write(line + '\n')
loss_file.flush()
if step == 1 or step % 10 == 0: # save summary
summary = summary_str = sess.run(merged)
sum_writer.add_summary(summary, step)
if step % 100 == 0 and save_predictions: # save current predictions
mfb = mfb_list[0] # only visualize prediction in first tower
ffg, fbg, lfg, gt_ffg, gt_fbg, gt_lfg = sess.run([
mfb.first_fg_rec[0], mfb.first_bg_rec[0], mfb.last_fg_rec[0], \
mfb.gt_ffg[0], mfb.gt_fbg[0], mfb.gt_lfg[0]])
ffg, fbg, lfg = (ffg+1)/2*255.0, (fbg+1)/2*255.0, (lfg+1)/2*255.0
gt_ffg, gt_fbg, gt_lfg = (gt_ffg+1)/2*255.0, (gt_fbg+1)/2*255.0, (gt_lfg+1)/2*255.0
img = gen_pred_img(ffg, fbg, lfg)
gt = gen_pred_img(gt_ffg, gt_fbg, gt_lfg)
save_img = np.concatenate((img, gt))
sm.imsave(os.path.join(pred_save_dir, '%07d.jpg'%step), save_img)
if step % 500 == 0: # save checkpoint
saver.save(sess, os.path.join(model_save_dir, model_filename), global_step=global_step)
if step % 500 == 0:
pass
# launch a new script for validation (please modify it for your own script)
#subprocess.check_output(['python', evaluation_job])
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run() | [
"tensorflow.control_dependencies",
"tensorflow.reduce_mean",
"tensorflow.app.run",
"os.path.exists",
"argparse.ArgumentParser",
"tensorflow.train.Coordinator",
"tensorflow.Session",
"numpy.concatenate",
"tensorflow.train.exponential_decay",
"tensorflow.ConfigProto",
"tensorflow.summary.scalar",
... | [((315, 340), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (338, 340), False, 'import argparse\n'), ((2271, 2366), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['starter_learning_rate', 'global_step', '(100000)', '(0.5)'], {'staircase': '(True)'}), '(starter_learning_rate, global_step, 100000, 0.5,\n staircase=True)\n', (2297, 2366), True, 'import tensorflow as tf\n'), ((2413, 2450), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (2435, 2450), True, 'import tensorflow as tf\n'), ((2512, 2553), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (2526, 2553), True, 'import tensorflow as tf\n'), ((2562, 2587), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2572, 2587), True, 'import tensorflow as tf\n'), ((2597, 2619), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (2617, 2619), True, 'import tensorflow as tf\n'), ((4413, 4441), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_losses'], {}), '(tower_losses)\n', (4427, 4441), True, 'import tensorflow as tf\n'), ((4458, 4490), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_ffg_losses'], {}), '(tower_ffg_losses)\n', (4472, 4490), True, 'import tensorflow as tf\n'), ((4507, 4539), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_fbg_losses'], {}), '(tower_fbg_losses)\n', (4521, 4539), True, 'import tensorflow as tf\n'), ((4556, 4588), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_lfg_losses'], {}), '(tower_lfg_losses)\n', (4570, 4588), True, 'import tensorflow as tf\n'), ((4605, 4638), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_feat_losses'], {}), '(tower_feat_losses)\n', (4619, 4638), True, 'import tensorflow as tf\n'), ((4655, 4686), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_wd_losses'], {}), '(tower_wd_losses)\n', (4669, 4686), True, 'import tensorflow as tf\n'), ((4689, 4723), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss_op'], {}), "('loss', loss_op)\n", (4706, 4723), True, 'import tensorflow as tf\n'), ((4725, 4767), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""ffg_loss"""', 'ffg_loss_op'], {}), "('ffg_loss', ffg_loss_op)\n", (4742, 4767), True, 'import tensorflow as tf\n'), ((4769, 4811), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fbg_loss"""', 'fbg_loss_op'], {}), "('fbg_loss', fbg_loss_op)\n", (4786, 4811), True, 'import tensorflow as tf\n'), ((4813, 4855), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""lfg_loss"""', 'lfg_loss_op'], {}), "('lfg_loss', lfg_loss_op)\n", (4830, 4855), True, 'import tensorflow as tf\n'), ((4857, 4901), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""feat_loss"""', 'feat_loss_op'], {}), "('feat_loss', feat_loss_op)\n", (4874, 4901), True, 'import tensorflow as tf\n'), ((4903, 4943), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""wd_loss"""', 'wd_loss_op'], {}), "('wd_loss', wd_loss_op)\n", (4920, 4943), True, 'import tensorflow as tf\n'), ((4959, 5001), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (4976, 5001), True, 'import tensorflow as tf\n'), ((5192, 5222), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (5206, 5222), True, 'import tensorflow as tf\n'), ((5231, 5260), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5258, 5260), True, 'import tensorflow as tf\n'), ((5641, 5663), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5661, 5663), True, 'import tensorflow as tf\n'), ((5746, 5794), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['logs_save_dir', 'sess.graph'], {}), '(logs_save_dir, sess.graph)\n', (5767, 5794), True, 'import tensorflow as tf\n'), ((6188, 6210), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (6208, 6210), True, 'import tensorflow as tf\n'), ((6222, 6274), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess', 'coord': 'coord'}), '(sess=sess, coord=coord)\n', (6250, 6274), True, 'import tensorflow as tf\n'), ((8473, 8485), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (8483, 8485), True, 'import tensorflow as tf\n'), ((1666, 1696), 'os.path.exists', 'os.path.exists', (['model_save_dir'], {}), '(model_save_dir)\n', (1680, 1696), False, 'import os\n'), ((1700, 1727), 'os.makedirs', 'os.makedirs', (['model_save_dir'], {}), '(model_save_dir)\n', (1711, 1727), False, 'import os\n'), ((2790, 2831), 'os.path.join', 'os.path.join', (['dataset_path', 'train_list[i]'], {}), '(dataset_path, train_list[i])\n', (2802, 2831), False, 'import os\n'), ((3381, 3406), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""vars"""'], {}), "('vars')\n", (3398, 3406), True, 'import tensorflow as tf\n'), ((5048, 5083), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (5071, 5083), True, 'import tensorflow as tf\n'), ((5286, 5316), 'os.path.exists', 'os.path.exists', (['model_save_dir'], {}), '(model_save_dir)\n', (5300, 5316), False, 'import os\n'), ((5320, 5347), 'os.makedirs', 'os.makedirs', (['model_save_dir'], {}), '(model_save_dir)\n', (5331, 5347), False, 'import os\n'), ((5422, 5464), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_save_dir'], {}), '(model_save_dir)\n', (5448, 5464), True, 'import tensorflow as tf\n'), ((5672, 5701), 'os.path.exists', 'os.path.exists', (['logs_save_dir'], {}), '(logs_save_dir)\n', (5686, 5701), False, 'import os\n'), ((5705, 5731), 'os.makedirs', 'os.makedirs', (['logs_save_dir'], {}), '(logs_save_dir)\n', (5716, 5731), False, 'import os\n'), ((5839, 5868), 'os.path.exists', 'os.path.exists', (['pred_save_dir'], {}), '(pred_save_dir)\n', (5853, 5868), False, 'import os\n'), ((5872, 5898), 'os.makedirs', 'os.makedirs', (['pred_save_dir'], {}), '(pred_save_dir)\n', (5883, 5898), False, 'import os\n'), ((5937, 5966), 'os.path.exists', 'os.path.exists', (['loss_save_dir'], {}), '(loss_save_dir)\n', (5951, 5966), False, 'import os\n'), ((5970, 5996), 'os.makedirs', 'os.makedirs', (['loss_save_dir'], {}), '(loss_save_dir)\n', (5981, 5996), False, 'import os\n'), ((6015, 6059), 'os.path.join', 'os.path.join', (['loss_save_dir', "(prefix + '.txt')"], {}), "(loss_save_dir, prefix + '.txt')\n", (6027, 6059), False, 'import os\n'), ((2142, 2168), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (2165, 2168), True, 'import tensorflow as tf\n'), ((3471, 3503), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % gpu_index)"], {}), "('/gpu:%d' % gpu_index)\n", (3480, 3503), True, 'import tensorflow as tf\n'), ((6447, 6458), 'time.time', 'time.time', ([], {}), '()\n', (6456, 6458), False, 'import time\n'), ((3516, 3561), 'tensorflow.name_scope', 'tf.name_scope', (["('%s_%d' % ('tower', gpu_index))"], {}), "('%s_%d' % ('tower', gpu_index))\n", (3529, 3561), True, 'import tensorflow as tf\n'), ((3990, 4014), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4012, 4014), True, 'import tensorflow as tf\n'), ((6497, 6508), 'time.time', 'time.time', ([], {}), '()\n', (6506, 6508), False, 'import time\n'), ((7780, 7805), 'numpy.concatenate', 'np.concatenate', (['(img, gt)'], {}), '((img, gt))\n', (7794, 7805), True, 'import numpy as np\n'), ((7821, 7867), 'os.path.join', 'os.path.join', (['pred_save_dir', "('%07d.jpg' % step)"], {}), "(pred_save_dir, '%07d.jpg' % step)\n", (7833, 7867), False, 'import os\n'), ((7942, 7986), 'os.path.join', 'os.path.join', (['model_save_dir', 'model_filename'], {}), '(model_save_dir, model_filename)\n', (7954, 7986), False, 'import os\n')] |
import argparse
from pathlib import Path
from PIL import Image
import cv2
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
from utils import Options, overlap_ratio
from models.mdnet import MDNet, BCELoss
from models.extractor import SampleGenerator, RegionExtractor
from models.regressor import BBRegressor
def forward_samples(model, image, samples, opts, out_layer='conv3'):
model.eval()
extractor = RegionExtractor(image, samples, opts.img_size, opts.padding, opts.batch_test)
for i, regions in enumerate(extractor):
if opts.use_gpu:
regions = regions.cuda()
with torch.no_grad():
feat = model(regions, out_layer=out_layer)
feats = torch.cat((feats, feat.detach().clone()), 0) if i else feat.detach().clone()
return feats
def train(model, criterion, optimizer,
pos_feats, neg_feats, maxiter, opts,
in_layer='fc4'):
model.train()
batch_pos = opts.batch_pos
batch_neg = opts.batch_neg
batch_test = opts.batch_test
batch_neg_cand = max(opts.batch_neg_cand, batch_neg)
pos_idx = np.random.permutation(pos_feats.size(0))
neg_idx = np.random.permutation(neg_feats.size(0))
while len(pos_idx) < batch_pos * maxiter:
pos_idx = np.concatenate([pos_idx, np.random.permutation(pos_feats.size(0))])
while len(neg_idx) < batch_neg_cand * maxiter:
neg_idx = np.concatenate([neg_idx, np.random.permutation(neg_feats.size(0))])
pos_pointer = 0
neg_pointer = 0
for _ in range(maxiter):
# select pos idx
pos_next = pos_pointer + batch_pos
pos_cur_idx = pos_idx[pos_pointer:pos_next]
pos_cur_idx = pos_feats.new(pos_cur_idx).long()
pos_pointer = pos_next
# select neg idx
neg_next = neg_pointer + batch_neg_cand
neg_cur_idx = neg_idx[neg_pointer:neg_next]
neg_cur_idx = neg_feats.new(neg_cur_idx).long()
neg_pointer = neg_next
# create batch
batch_pos_feats = pos_feats[pos_cur_idx]
batch_neg_feats = neg_feats[neg_cur_idx]
# hard negative mining
if batch_neg_cand > batch_neg:
model.eval()
for start in range(0, batch_neg_cand, batch_test):
end = min(start + batch_test, batch_neg_cand)
with torch.no_grad():
score = model(batch_neg_feats[start:end], in_layer=in_layer)
if start == 0:
neg_cand_score = score.detach()[:, 1].clone()
else:
neg_cand_score = torch.cat((neg_cand_score, score.detach()[:, 1].clone()), 0)
_, top_idx = neg_cand_score.topk(batch_neg)
batch_neg_feats = batch_neg_feats[top_idx]
model.train()
# forward
pos_score = model(batch_pos_feats, in_layer=in_layer)
neg_score = model(batch_neg_feats, in_layer=in_layer)
# optimize
loss = criterion(pos_score, neg_score)
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opts.grad_clip)
optimizer.step()
def main(images, init_bbox, ground_truths, opts):
device = ('cuda' if opts.use_gpu else 'cpu')
model = MDNet(opts.model_path).to(device)
criterion = BCELoss()
# Set learnable parameters
for k, p in model.params.items():
p.requires_grad = any([k.startswith(l) for l in opts.ft_layers])
# Set optimizer states
def set_optimizer(lr_base, lr_mult, momentum=0.9, w_decay=0.0005):
param_list = []
for k, p in filter(lambda kp: kp[1].requires_grad, model.params.items()):
lr = lr_base
for l, m in lr_mult.items():
if k.startswith(l):
lr = lr_base * m
param_list.append({'params': [p], 'lr': lr})
return optim.SGD(param_list, lr=lr, momentum=momentum, weight_decay=w_decay)
init_optimizer = set_optimizer(opts.lr_init, opts.lr_mult)
update_optimizer = set_optimizer(opts.lr_update, opts.lr_mult)
# Load first image
image = Image.open(images[0]).convert('RGB')
# Draw pos/neg samples
pos_examples = SampleGenerator('gaussian', image.size, opts.trans_pos, opts.scale_pos)(
init_bbox, opts.n_pos_init, opts.overlap_pos_init)
neg_examples = np.concatenate([
SampleGenerator('uniform', image.size, opts.trans_neg_init, opts.scale_neg_init)(
init_bbox, int(opts.n_neg_init * 0.5), opts.overlap_neg_init),
SampleGenerator('whole', image.size)(
init_bbox, int(opts.n_neg_init * 0.5), opts.overlap_neg_init)])
neg_examples = np.random.permutation(neg_examples)
# Extract pos/neg features
pos_feats = forward_samples(model, image, pos_examples, opts)
neg_feats = forward_samples(model, image, neg_examples, opts)
# Initial training
train(model, criterion, init_optimizer, pos_feats, neg_feats, opts.maxiter_init, opts)
del init_optimizer, neg_feats
torch.cuda.empty_cache()
# Train bbox Regressor
bbreg_examples = SampleGenerator('uniform', image.size, opts.trans_bbreg, opts.scale_bbreg, opts.aspect_bbreg)\
(init_bbox, opts.n_bbreg, opts.overlap_bbreg)
bbreg_feats = forward_samples(model, image, bbreg_examples, opts)
bbreg = BBRegressor(image.size)
bbreg.train(bbreg_feats, bbreg_examples, init_bbox)
del bbreg_feats
torch.cuda.empty_cache()
# Init sample generators for update
sample_generator = SampleGenerator('gaussian', image.size, opts.trans, opts.scale)
pos_generator = SampleGenerator('gaussian', image.size, opts.trans_pos, opts.scale_pos)
neg_generator = SampleGenerator('uniform', image.size, opts.trans_neg, opts.scale_neg)
# Init pos/neg features for update
neg_examples = neg_generator(init_bbox, opts.n_neg_update, opts.overlap_neg_init)
neg_feats = forward_samples(model, image, neg_examples, opts)
pos_feats_all = [pos_feats]
neg_feats_all = [neg_feats]
# Main loop
for i, image in enumerate(images[1:], 1):
image = Image.open(image).convert('RGB')
# Estimate target bbox
samples = sample_generator(init_bbox, opts.n_samples)
sample_scores = forward_samples(model, image, samples, opts, out_layer='fc6')
top_scores, top_idx = sample_scores[:, 1].topk(5)
top_idx = top_idx.cpu()
target_score = top_scores.mean()
init_bbox = samples[top_idx]
if top_idx.shape[0] > 1:
init_bbox = init_bbox.mean(axis=0)
success = target_score > 0
# Expand search area at failure
sample_generator.trans = opts.trans if success else min(sample_generator.trans * 1.1, opts.trans_limit)
# Bbox regression
if success:
bbreg_samples = samples[top_idx]
if top_idx.shape[0] == 1:
bbreg_samples = bbreg_samples[None, :]
bbreg_feats = forward_samples(model, image, bbreg_samples, opts)
bbreg_samples = bbreg.predict(bbreg_feats, bbreg_samples)
bbreg_bbox = bbreg_samples.mean(axis=0)
else:
bbreg_bbox = init_bbox
yield init_bbox, bbreg_bbox, overlap_ratio(ground_truths[i], bbreg_bbox)[0], target_score
# Data collect
if success:
pos_examples = pos_generator(init_bbox, opts.n_pos_update, opts.overlap_pos_update)
pos_feats = forward_samples(model, image, pos_examples, opts)
pos_feats_all.append(pos_feats)
if len(pos_feats_all) > opts.n_frames_long:
del pos_feats_all[0]
neg_examples = neg_generator(init_bbox, opts.n_neg_update, opts.overlap_neg_update)
neg_feats = forward_samples(model, image, neg_examples, opts)
neg_feats_all.append(neg_feats)
if len(neg_feats_all) > opts.n_frames_short:
del neg_feats_all[0]
# Short term update
# TODO: What if disable Short term upate?
if not success:
nframes = min(opts.n_frames_short, len(pos_feats_all))
pos_data = torch.cat(pos_feats_all[-nframes:], 0)
neg_data = torch.cat(neg_feats_all, 0)
train(model, criterion, update_optimizer, pos_data, neg_data, opts.maxiter_update, opts)
# Long term update
# TODO: What if disable Long term update?
elif i % opts.long_interval == 0:
pos_data = torch.cat(pos_feats_all, 0)
neg_data = torch.cat(neg_feats_all, 0)
train(model, criterion, update_optimizer, pos_data, neg_data, opts.maxiter_update, opts)
torch.cuda.empty_cache()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--seq', default='./datasets/DragonBaby', help='input seq')
args = parser.parse_args()
np.random.seed(0)
torch.manual_seed(0)
options = Options()
dataset = Path(args.seq)
images = list(sorted(dataset.joinpath('img').glob('*.jpg')))
ground_truths = pd.read_csv(str(dataset.joinpath('groundtruth_rect.txt')), header=None).values
# Run tracker
for i, (result, (x, y, w, h), overlap, score) in \
enumerate(main(images, ground_truths[0], ground_truths, options), 1):
image = np.asarray(Image.open(images[i]).convert('RGB'))
print(i, result)
gx, gy, gw, gh = ground_truths[i]
cv2.rectangle(image, (int(gx), int(gy)), (int(gx+gw), int(gy+gh)), (0, 255, 0), 2)
cv2.rectangle(image, (int(x), int(y)), (int(x+w), int(y+h)), (255, 0, 0), 2)
plt.imshow(image)
plt.pause(.1)
plt.draw()
| [
"models.regressor.BBRegressor",
"models.extractor.RegionExtractor",
"utils.overlap_ratio",
"models.mdnet.BCELoss",
"matplotlib.pyplot.imshow",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.random.seed",
"utils.Options",
"numpy.random.permutation",
"torch.optim.SGD",
"matplotlib.pyplot.paus... | [((476, 553), 'models.extractor.RegionExtractor', 'RegionExtractor', (['image', 'samples', 'opts.img_size', 'opts.padding', 'opts.batch_test'], {}), '(image, samples, opts.img_size, opts.padding, opts.batch_test)\n', (491, 553), False, 'from models.extractor import SampleGenerator, RegionExtractor\n'), ((3367, 3376), 'models.mdnet.BCELoss', 'BCELoss', ([], {}), '()\n', (3374, 3376), False, 'from models.mdnet import MDNet, BCELoss\n'), ((4734, 4769), 'numpy.random.permutation', 'np.random.permutation', (['neg_examples'], {}), '(neg_examples)\n', (4755, 4769), True, 'import numpy as np\n'), ((5087, 5111), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5109, 5111), False, 'import torch\n'), ((5393, 5416), 'models.regressor.BBRegressor', 'BBRegressor', (['image.size'], {}), '(image.size)\n', (5404, 5416), False, 'from models.regressor import BBRegressor\n'), ((5497, 5521), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5519, 5521), False, 'import torch\n'), ((5586, 5649), 'models.extractor.SampleGenerator', 'SampleGenerator', (['"""gaussian"""', 'image.size', 'opts.trans', 'opts.scale'], {}), "('gaussian', image.size, opts.trans, opts.scale)\n", (5601, 5649), False, 'from models.extractor import SampleGenerator, RegionExtractor\n'), ((5670, 5741), 'models.extractor.SampleGenerator', 'SampleGenerator', (['"""gaussian"""', 'image.size', 'opts.trans_pos', 'opts.scale_pos'], {}), "('gaussian', image.size, opts.trans_pos, opts.scale_pos)\n", (5685, 5741), False, 'from models.extractor import SampleGenerator, RegionExtractor\n'), ((5762, 5832), 'models.extractor.SampleGenerator', 'SampleGenerator', (['"""uniform"""', 'image.size', 'opts.trans_neg', 'opts.scale_neg'], {}), "('uniform', image.size, opts.trans_neg, opts.scale_neg)\n", (5777, 5832), False, 'from models.extractor import SampleGenerator, RegionExtractor\n'), ((8799, 8824), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8822, 8824), False, 'import argparse\n'), ((8952, 8969), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (8966, 8969), True, 'import numpy as np\n'), ((8974, 8994), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (8991, 8994), False, 'import torch\n'), ((9010, 9019), 'utils.Options', 'Options', ([], {}), '()\n', (9017, 9019), False, 'from utils import Options, overlap_ratio\n'), ((9034, 9048), 'pathlib.Path', 'Path', (['args.seq'], {}), '(args.seq)\n', (9038, 9048), False, 'from pathlib import Path\n'), ((3938, 4007), 'torch.optim.SGD', 'optim.SGD', (['param_list'], {'lr': 'lr', 'momentum': 'momentum', 'weight_decay': 'w_decay'}), '(param_list, lr=lr, momentum=momentum, weight_decay=w_decay)\n', (3947, 4007), True, 'import torch.optim as optim\n'), ((4259, 4330), 'models.extractor.SampleGenerator', 'SampleGenerator', (['"""gaussian"""', 'image.size', 'opts.trans_pos', 'opts.scale_pos'], {}), "('gaussian', image.size, opts.trans_pos, opts.scale_pos)\n", (4274, 4330), False, 'from models.extractor import SampleGenerator, RegionExtractor\n'), ((5161, 5258), 'models.extractor.SampleGenerator', 'SampleGenerator', (['"""uniform"""', 'image.size', 'opts.trans_bbreg', 'opts.scale_bbreg', 'opts.aspect_bbreg'], {}), "('uniform', image.size, opts.trans_bbreg, opts.scale_bbreg,\n opts.aspect_bbreg)\n", (5176, 5258), False, 'from models.extractor import SampleGenerator, RegionExtractor\n'), ((8731, 8755), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (8753, 8755), False, 'import torch\n'), ((9689, 9706), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (9699, 9706), True, 'import matplotlib.pyplot as plt\n'), ((9715, 9729), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (9724, 9729), True, 'import matplotlib.pyplot as plt\n'), ((9737, 9747), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (9745, 9747), True, 'import matplotlib.pyplot as plt\n'), ((675, 690), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (688, 690), False, 'import torch\n'), ((3316, 3338), 'models.mdnet.MDNet', 'MDNet', (['opts.model_path'], {}), '(opts.model_path)\n', (3321, 3338), False, 'from models.mdnet import MDNet, BCELoss\n'), ((4175, 4196), 'PIL.Image.open', 'Image.open', (['images[0]'], {}), '(images[0])\n', (4185, 4196), False, 'from PIL import Image\n'), ((8208, 8246), 'torch.cat', 'torch.cat', (['pos_feats_all[-nframes:]', '(0)'], {}), '(pos_feats_all[-nframes:], 0)\n', (8217, 8246), False, 'import torch\n'), ((8270, 8297), 'torch.cat', 'torch.cat', (['neg_feats_all', '(0)'], {}), '(neg_feats_all, 0)\n', (8279, 8297), False, 'import torch\n'), ((4436, 4521), 'models.extractor.SampleGenerator', 'SampleGenerator', (['"""uniform"""', 'image.size', 'opts.trans_neg_init', 'opts.scale_neg_init'], {}), "('uniform', image.size, opts.trans_neg_init, opts.scale_neg_init\n )\n", (4451, 4521), False, 'from models.extractor import SampleGenerator, RegionExtractor\n'), ((4601, 4637), 'models.extractor.SampleGenerator', 'SampleGenerator', (['"""whole"""', 'image.size'], {}), "('whole', image.size)\n", (4616, 4637), False, 'from models.extractor import SampleGenerator, RegionExtractor\n'), ((6168, 6185), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (6178, 6185), False, 'from PIL import Image\n'), ((8542, 8569), 'torch.cat', 'torch.cat', (['pos_feats_all', '(0)'], {}), '(pos_feats_all, 0)\n', (8551, 8569), False, 'import torch\n'), ((8593, 8620), 'torch.cat', 'torch.cat', (['neg_feats_all', '(0)'], {}), '(neg_feats_all, 0)\n', (8602, 8620), False, 'import torch\n'), ((2384, 2399), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2397, 2399), False, 'import torch\n'), ((7292, 7335), 'utils.overlap_ratio', 'overlap_ratio', (['ground_truths[i]', 'bbreg_bbox'], {}), '(ground_truths[i], bbreg_bbox)\n', (7305, 7335), False, 'from utils import Options, overlap_ratio\n'), ((9397, 9418), 'PIL.Image.open', 'Image.open', (['images[i]'], {}), '(images[i])\n', (9407, 9418), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
# test_adapter.py
"""
Unit for schema adapter
"""
from __future__ import print_function
import importlib
import os
import random
import sys
import tempfile
import numpy
from random_words import RandomWords, LoremIpsum
rw = RandomWords()
li = LoremIpsum()
from . import _random_integer, Py23FixTestCase, _random_float, _random_floats
from .. import EMDB_SFF_VERSION
from ..core import _xrange, _str, _print
from ..schema import base # , emdb_sff
# dynamically import the latest schema generateDS API
emdb_sff_name = 'sfftkrw.schema.v{schema_version}'.format(
schema_version=EMDB_SFF_VERSION.replace('.', '_')
)
# dynamically import the adapter for the API
adapter_name = 'sfftkrw.schema.adapter_v{schema_version}'.format(
schema_version=EMDB_SFF_VERSION.replace('.', '_')
)
emdb_sff = importlib.import_module(emdb_sff_name)
adapter = importlib.import_module(adapter_name)
class TestSFFTypeError(Py23FixTestCase):
"""Tests for the exception"""
def test_default(self):
"""Test default operation"""
c = adapter.SFFRGBA()
with self.assertRaisesRegex(base.SFFTypeError, r".*?is not object of type.*?"):
c == adapter.SFFSegment()
def test_message(self):
"""Test error raised with message"""
v = adapter.SFFVolumeStructure()
with self.assertRaisesRegex(base.SFFTypeError, r"should be of length 3"):
v.value = (1, 2)
class TestSFFType(Py23FixTestCase):
"""Tests for the main base class"""
def test_create_segmentation(self):
"""Test that a created empty segmentation has the correct version"""
S = adapter.SFFSegmentation()
_S = emdb_sff.segmentation()
self.assertEqual(S.version, _S.schema_version)
def test_gds_type_missing(self):
"""Test for presence of `gds_type` attribute"""
class _SomeEntity(base.SFFType):
"""Empty entity"""
with self.assertRaisesRegex(ValueError, r'.*gds_type.*'):
_s = _SomeEntity()
def test_create_from_gds_type(self):
"""Test creating an `SFFType` subclass object from a `gds_type' object"""
# we will try with SFFRGBA and rgba_type
red = _random_float()
green = _random_float()
blue = _random_float()
_r = emdb_sff.rgba_type(
red=red, green=green, blue=blue,
)
r = adapter.SFFRGBA.from_gds_type(_r)
self.assertIsInstance(r, adapter.SFFRGBA)
self.assertEqual(r.red, red)
self.assertEqual(r.green, green)
self.assertEqual(r.blue, blue)
# from None returns None
r = adapter.SFFRGBA.from_gds_type()
self.assertIsNone(r)
def test_create_from_gds_type_raises_error(self):
"""Test that we get an exception when the `SFFType` subclass object's `gds_type` attribute is not the same
as the one provided"""
_r = emdb_sff.biological_annotationType()
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
r = adapter.SFFRGBA.from_gds_type(_r)
def test_ref_attr(self):
"""Test the `gds_tag_name` attribute"""
c = adapter.SFFRGBA(
red=1, green=1, blue=0, alpha=0.5
)
r = repr(c)
self.assertRegex(r, r"\(.*\d+,.*\)")
def test_repr_string_repr_args(self):
"""Test the string representation using `repr_string` and `repr_args`"""
# correct rendering for colour: prints out repr_string filled with repr_args
c = adapter.SFFRGBA(random_colour=True)
self.assertRegex(_str(c), r"SFFRGBA\(red=\d\.\d+.*\)")
# correct assessment of length: prints out a string with the correct len() value
c = adapter.SFFSoftwareList()
c.id = rw.random_words(count=10)
self.assertRegex(_str(c), r"SFFSoftwareList\(\[.*\]\)")
# plain string: prints the plain string
v = adapter.SFFThreeDVolume()
self.assertRegex(_str(v), r"""SFFThreeDVolume\(lattice_id=None, value=None, transform_id=None\)""")
# len() works
class _SoftwareList(adapter.SFFSoftwareList):
repr_string = u'software list of length {}'
repr_args = (u'len()',)
Sw = _SoftwareList()
no_sw = _random_integer(start=2, stop=10)
[Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(no_sw)]
self.assertRegex(_str(Sw), r".*{}.*".format(no_sw))
# using index syntax
class _Lattice(adapter.SFFLattice):
repr_string = u"{}"
repr_args = (u"data[:20]",)
L = _Lattice.from_array(numpy.random.randint(0, 10, size=(5, 5, 5)),
size=adapter.SFFVolumeStructure(rows=5, cols=5, sections=5))
self.assertRegex(_str(L), r"\".*\.\.\.\"")
# no repr_args
class _Complexes(adapter.SFFSoftwareList):
repr_string = u"complexes"
repr_args = ()
Sw = _Complexes()
self.assertEqual(_str(Sw), u"complexes")
# repr_str is missing: prints out the output of type
class _RGBA(adapter.SFFRGBA):
repr_string = ""
_c = _RGBA(random_colour=True)
self.assertRegex(str(_c), r".class.*_RGBA.*")
# unmatched repr_args (it should be a tuple of four values)
class _RGBA(adapter.SFFRGBA):
repr_args = (u'red', u'green')
_c = _RGBA(random_colour=True)
with self.assertRaisesRegex(ValueError, r'Unmatched number.*'):
str(_c)
def test_export_xml(self):
"""Test that we can export a segmentation as XML"""
S = adapter.SFFSegmentation()
S.name = u'test segmentation'
S.primary_descriptor = u'mesh_list'
S.details = li.get_sentences(sentences=10)
tf = tempfile.NamedTemporaryFile()
tf.name += '.sff'
S.export(tf.name)
_S = adapter.SFFSegmentation.from_file(tf.name)
self.assertEqual(S, _S)
def test_export_hdf5(self):
"""Test that we can export a segmentation as XML"""
S = adapter.SFFSegmentation()
S.name = u'test segmentation'
S.primary_descriptor = u'mesh_list'
S.software = adapter.SFFSoftware()
S.transforms = adapter.SFFTransformList()
S.bounding_box = adapter.SFFBoundingBox()
S.global_external_references = adapter.SFFGlobalExternalReferenceList()
S.segments = adapter.SFFSegmentList()
S.lattices = adapter.SFFLatticeList()
S.details = li.get_sentences(sentences=10)
tf = tempfile.NamedTemporaryFile()
tf.name += u'.hff'
S.export(tf.name)
_S = adapter.SFFSegmentation.from_file(tf.name)
self.assertEqual(S, _S)
def test_export_json(self):
"""Test that we can export a segmentation as XML"""
S = adapter.SFFSegmentation()
S.name = u'test segmentation'
S.primary_descriptor = u'mesh_list'
S.software = adapter.SFFSoftware()
S.transforms = adapter.SFFTransformList()
S.bounding_box = adapter.SFFBoundingBox()
S.global_external_references = adapter.SFFGlobalExternalReferenceList()
S.segments = adapter.SFFSegmentList()
S.lattices = adapter.SFFLatticeList()
S.details = li.get_sentences(sentences=10)
tf = tempfile.NamedTemporaryFile()
tf.name += '.json'
S.export(tf.name)
_S = adapter.SFFSegmentation.from_file(tf.name)
self.assertEqual(S, _S)
def test_export_stderr(self):
"""Test that we can export to stderr"""
S = adapter.SFFSegmentation(
name=rw.random_word(),
primary_descriptor=u'mesh_list',
)
# we check that everything was OK
self.assertEqual(S.export(sys.stderr), os.EX_OK)
def test_export_errors(self):
"""Test that we catch all export errors"""
tf = tempfile.NamedTemporaryFile()
tf.name += u'.invalid'
self.assertEqual(os.EX_DATAERR,
adapter.SFFSegmentation(name=rw.random_word(), primary_descriptor=u'mesh_list').export(
tf.name))
def test_format_method_missing(self):
"""Test that we get `NotImplementedError`s"""
class _SomeEntity(base.SFFType):
"""Empty entity"""
gds_type = emdb_sff.segmentation
_S = _SomeEntity()
with self.assertRaises(NotImplementedError):
_S.as_hff(u'test')
with self.assertRaises(NotImplementedError):
_S.from_hff(u'test')
def test_validation(self):
"""Test validation check"""
s = adapter.SFFSegment(colour=adapter.SFFRGBA(random_colour=True))
self.assertTrue(s._is_valid())
s = adapter.SFFSegment(colour=adapter.SFFRGBA(random_colour=True), new_obj=False)
self.assertFalse(s._is_valid())
def test_eq_attrs(self):
"""Test the attribute that is a list of attributes for equality testing"""
class _SomeEntity(adapter.SFFBoundingBox):
eq_attrs = [u'xmin', u'xmax']
# we test equality of bounding boxes only on xmin and xmax
# equal
b1 = _SomeEntity(xmin=1, xmax=2)
b2 = _SomeEntity(xmin=1, xmax=2)
self.assertEqual(b1, b2)
# not equal
b1 = _SomeEntity(xmin=1, xmax=2)
b2 = _SomeEntity(xmin=0, xmax=2)
self.assertNotEqual(b1, b2)
# when not defined we get False by default
class _SomeEntity(adapter.SFFBoundingBox):
"""eq_attrs is empty by default"""
eq_attrs = []
b1 = _SomeEntity(xmin=1)
b2 = _SomeEntity(xmin=1)
self.assertNotEqual(b1, b2)
# exception: we can't compare things that are not of the same type
with self.assertRaises(base.SFFTypeError):
s = adapter.SFFSegment()
b1 == s
class TestSFFIndexType(Py23FixTestCase):
"""Test the indexing mixin class `SFFIndexType"""
def setUp(self):
"""Reset ids"""
adapter.SFFSegment.segment_id = 1 # reset ID informarly
adapter.SFFShape.shape_id = 0
def test_create_from_gds_type(self):
"""Test creating an `SFFIndexType` subclass object from a gds type"""
# segment
_s = emdb_sff.segment_type()
s = adapter.SFFSegment.from_gds_type(_s)
self.assertIsNone(s.id)
_t = emdb_sff.segment_type(id=10)
t = adapter.SFFSegment.from_gds_type(_t)
self.assertEqual(t.id, 10)
u = adapter.SFFSegment.from_gds_type(None)
self.assertIsNone(u)
with self.assertRaises(adapter.SFFTypeError):
adapter.SFFSegment.from_gds_type([])
def test_explicit_set_id(self):
"""Test that we can explicitly set ID apart from incrementing"""
s = adapter.SFFSegment()
self.assertEqual(s.id, 1)
s = adapter.SFFSegment(id=999)
self.assertEqual(s.id, 999)
s = adapter.SFFSegment()
self.assertEqual(s.id, 1000)
def test_new_obj_True(self):
"""Test that an empty `SFFIndexType` subclass has correct indexes"""
s = adapter.SFFSegment()
self.assertEqual(s.id, 1)
s = adapter.SFFSegment(new_obj=True) # verbose: `new_obj=True` by default
self.assertEqual(s.id, 2)
def test_new_obj_False(self):
"""Test that `new_obj=False` for empty `SFFIndexType` subclass has None for ID"""
s = adapter.SFFSegment(new_obj=False)
self.assertIsNone(s.id)
def test_proper_incrementing(self):
"""Test that proper incrementing with and without `new_obj=False/True`"""
s = adapter.SFFSegment()
self.assertEqual(s.id, 1)
s = adapter.SFFSegment()
self.assertEqual(s.id, 2)
s = adapter.SFFSegment(new_obj=False)
self.assertIsNone(s.id)
s = adapter.SFFSegment()
self.assertEqual(s.id, 3)
s = adapter.SFFSegment(new_obj=True)
self.assertEqual(s.id, 4)
s = adapter.SFFSegment(new_obj=False)
self.assertIsNone(s.id)
s = adapter.SFFSegment()
self.assertEqual(s.id, 5)
s = adapter.SFFSegment.from_gds_type(emdb_sff.segment_type(id=35))
self.assertEqual(s.id, 35)
s = adapter.SFFSegment.from_gds_type(emdb_sff.segment_type())
self.assertIsNone(s.id)
s = adapter.SFFSegment()
self.assertEqual(s.id, 6)
def test_with_gds_type(self):
"""Test that we can work with generateDS types"""
s = adapter.SFFSegment.from_gds_type(emdb_sff.segment_type())
self.assertIsNone(s.id)
s = adapter.SFFSegment.from_gds_type(emdb_sff.segment_type(id=37))
self.assertIsNotNone(s.id)
self.assertEqual(adapter.SFFSegment.segment_id, 1)
s = adapter.SFFSegment.from_gds_type(emdb_sff.segment_type(id=38))
self.assertEqual(adapter.SFFSegment.segment_id, 1)
def test_reset_id(self):
"""Test that we can reset the ID"""
s = adapter.SFFSegment()
self.assertEqual(s.id, 1)
s = adapter.SFFSegment()
self.assertEqual(s.id, 2)
s = adapter.SFFSegment()
self.assertEqual(s.id, 3)
s = adapter.SFFSegment()
self.assertEqual(s.id, 4)
adapter.SFFSegment.reset_id()
s = adapter.SFFSegment()
self.assertEqual(s.id, 1)
s = adapter.SFFSegment()
self.assertEqual(s.id, 2)
s = adapter.SFFSegment()
self.assertEqual(s.id, 3)
s = adapter.SFFSegment()
self.assertEqual(s.id, 4)
def test_index_in_super(self):
"""Test that indexes work correctly in subclasses"""
cone = adapter.SFFCone()
cuboid = adapter.SFFCuboid()
cylinder = adapter.SFFCylinder()
ellipsoid = adapter.SFFEllipsoid()
self.assertEqual(cone.shape_id, 0)
self.assertEqual(cuboid.shape_id, 1)
self.assertEqual(cylinder.shape_id, 2)
self.assertEqual(ellipsoid.shape_id, 3)
cone = adapter.SFFCone()
cuboid = adapter.SFFCuboid()
cylinder = adapter.SFFCylinder()
ellipsoid = adapter.SFFEllipsoid()
self.assertEqual(cone.shape_id, 4)
self.assertEqual(cuboid.shape_id, 5)
self.assertEqual(cylinder.shape_id, 6)
self.assertEqual(ellipsoid.shape_id, 7)
def test_index_in_super_error(self):
"""Test that we get an `AttributeError` when `update_counter` is missing"""
class _Shape(base.SFFIndexType, base.SFFType):
repr_string = "{} {}"
repr_args = ('ref', 'id')
shape_id = 0
index_attr = 'shape_id'
index_in_super = True
# attributes
id = base.SFFAttribute('id', help="the ID of this shape")
transform_id = base.SFFAttribute(
'transform_id',
help="the transform applied to this shape to position it in the space"
)
attribute = base.SFFAttribute(
'attribute',
help="extra attribute information e.g. 'FOM'"
)
class _Cone(_Shape):
gds_type = emdb_sff.cone
ref = "cone"
with self.assertRaisesRegex(AttributeError, r".*superclass does not have an 'update_counter' classmethod"):
_cone = _Cone()
def test_errors(self):
"""Test that we get the right exceptions"""
class _Segment(adapter.SFFSegment):
index_attr = ''
with self.assertRaisesRegex(base.SFFTypeError, r".*subclasses must provide an index attribute"):
_Segment()
class _Segment(adapter.SFFSegment):
index_attr = 'segment_index'
with self.assertRaisesRegex(AttributeError, r".*is missing a class variable.*"):
_Segment()
class _Segment(adapter.SFFSegment):
segment_id = 3.8
with self.assertRaises(base.SFFTypeError):
_Segment()
class TestSFFListType(Py23FixTestCase):
"""Test the iteration mixin class `SFFListType`"""
def test_create_from_gds_type(self):
"""Test create from a gds_type"""
# empty list
_S = emdb_sff.segment_listType()
S = adapter.SFFSegmentList.from_gds_type(_S)
self.assertEqual(len(S), 0)
# populated list; no segment IDS
_T = emdb_sff.segment_listType()
_no_items = _random_integer(start=2, stop=10)
[_T.add_segment(emdb_sff.segment_type()) for _ in _xrange(_no_items)]
T = adapter.SFFSegmentList.from_gds_type(_T)
self.assertEqual(len(T), _no_items)
# populated list; with segment IDS
_U = emdb_sff.segment_listType()
[_U.add_segment(emdb_sff.segment_type(id=i)) for i in _xrange(1, _no_items + 1)]
U = adapter.SFFSegmentList.from_gds_type(_U)
self.assertEqual(len(U), _no_items)
self.assertEqual(len(U._id_dict), _no_items)
# error
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
adapter.SFFSegmentList.from_gds_type([])
# def test_create_from_list(self):
# """Test that we can create a `SFFListType` object from a literal list of contained objects"""
# # segments
# _no_items = _random_integer(start=2, stop=10)
# _S = [adapter.SFFSegment() for _ in _xrange(_no_items)]
# S = adapter.SFFSegmentList.from_list(_S)
# self.assertEqual(len(S), _no_items)
# self.assertEqual(len(S._id_dict), _no_items)
# self.assertIsInstance(S.get_by_id(1), adapter.SFFSegment)
def test_length(self):
"""Test that we can evaluate length"""
# segments
S = adapter.SFFSegmentList()
_no_segments = _random_integer(start=1, stop=10)
[S.append(adapter.SFFSegment()) for _ in _xrange(_no_segments)]
self.assertEqual(len(S), _no_segments)
# shapes
Sh = adapter.SFFShapePrimitiveList()
_no_shapes = _random_integer(start=1, stop=10)
[Sh.append(adapter.SFFCone()) for _ in _xrange(_no_shapes)]
[Sh.append(adapter.SFFCuboid()) for _ in _xrange(_no_shapes)]
[Sh.append(adapter.SFFCylinder()) for _ in _xrange(_no_shapes)]
[Sh.append(adapter.SFFEllipsoid()) for _ in _xrange(_no_shapes)]
self.assertEqual(len(Sh), _no_shapes * 4)
def test_reset_id(self):
"""Test that we can reset IDs"""
adapter.SFFSegmentList()
s = adapter.SFFSegment()
self.assertEqual(s.id, 1)
adapter.SFFSegmentList()
s = adapter.SFFSegment()
self.assertEqual(s.id, 1)
def test_iterate(self):
"""Test that we can iterate"""
# segments
S = adapter.SFFSegmentList()
_no_segments = _random_integer(start=2, stop=10)
for _ in _xrange(_no_segments):
S.append(adapter.SFFSegment())
for i, s in enumerate(S, start=1):
self.assertIsInstance(s, adapter.SFFSegment)
self.assertEqual(s.id, i)
# software list
Sw = adapter.SFFSoftwareList()
_no_sw = _random_integer(start=2, stop=10)
[Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(_no_sw)]
self.assertEqual(next(iter(Sw)), Sw[0])
def test_sibling_classes(self):
"""Test that the `sibling_classes` attribute works"""
Sh = adapter.SFFShapePrimitiveList()
_no_items = _random_integer(start=2, stop=10)
[Sh.append(adapter.SFFCone(height=_random_float(), bottomRadius=_random_float())) for _ in _xrange(_no_items)]
[Sh.append(adapter.SFFCuboid(x=_random_float(), y=_random_float(), z=_random_float())) for _ in
_xrange(_no_items)]
[Sh.append(adapter.SFFCylinder(height=_random_float(), diameter=_random_float())) for _ in _xrange(_no_items)]
[Sh.append(adapter.SFFEllipsoid(x=_random_float(), y=_random_float(), z=_random_float())) for _ in
_xrange(_no_items)]
for i in _xrange(_no_items):
self.assertIsInstance(Sh[i], adapter.SFFCone)
for i in _xrange(i + 1, _no_items * 2):
self.assertIsInstance(Sh[i], adapter.SFFCuboid)
for i in _xrange(i + 1, _no_items * 3):
self.assertIsInstance(Sh[i], adapter.SFFCylinder)
for i in _xrange(i + 1, _no_items * 4):
self.assertIsInstance(Sh[i], adapter.SFFEllipsoid)
# exceptions
class _Shapes(adapter.SFFShapePrimitiveList):
sibling_classes = [
(emdb_sff.cone, adapter.SFFCone),
(emdb_sff.ellipsoid, adapter.SFFEllipsoid)
]
_S = _Shapes()
_S.append(adapter.SFFCylinder())
with self.assertRaises(base.SFFTypeError):
_S[0]
def test_getitem(self):
"""Test that we use index syntax to retrieve an object"""
# segments
S = adapter.SFFSegmentList()
_no_segments = _random_integer(start=3, stop=10)
[S.append(adapter.SFFSegment()) for _ in _xrange(_no_segments)]
self.assertIsInstance(S[_no_segments - 1], adapter.SFFSegment)
# software list
Sw = adapter.SFFSoftwareList()
_no_sw = _random_integer(start=3, stop=10)
[Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(_no_sw)]
self.assertIsInstance(Sw[_no_sw - 1], adapter.SFFSoftware)
# do we get an IndexError?
with self.assertRaises(IndexError):
_ = S[_no_segments]
with self.assertRaises(IndexError):
_ = Sw[_no_sw]
def test_setitem(self):
"""Test that we can use index syntax to set an object"""
# segment
S = adapter.SFFSegmentList()
S.append(adapter.SFFSegment())
S[0] = adapter.SFFSegment()
self.assertEqual(len(S), 1)
# software
Sw = adapter.SFFSoftwareList()
Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word()))
Sw[0] = adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())
self.assertEqual(len(Sw), 1)
# exceptions
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
S[0] = rw.random_word()
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
S[0] = adapter.SFFSoftwareList()
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
Sw[0] = adapter.SFFSegment()
def test_delitem(self):
"""Test that we can use index syntax for setting an item to the list"""
# segments
S = adapter.SFFSegmentList()
S.append(adapter.SFFSegment())
del S[0]
self.assertEqual(len(S), 0)
# software list
Sw = adapter.SFFSoftwareList()
Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word()))
del Sw[0]
self.assertEqual(len(Sw), 0)
def test_append(self):
"""Test that we can append to the end of the list"""
# segments
S = adapter.SFFSegmentList()
self.assertEqual(len(S), 0)
S.append(adapter.SFFSegment())
self.assertEqual(len(S), 1)
# software
Sw = adapter.SFFSoftwareList()
self.assertEqual(len(Sw), 0)
Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word()))
self.assertEqual(len(Sw), 1)
# exceptions
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
S.append(rw.random_word())
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
S.append(adapter.SFFSoftwareList())
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
Sw.append(adapter.SFFSegment())
def test_clear(self):
"""Test that we can clear the list"""
Sw = adapter.SFFSoftwareList()
_no_sw = _random_integer(start=2, stop=10)
[Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(_no_sw)]
self.assertEqual(len(Sw), _no_sw)
Sw.clear()
self.assertEqual(len(Sw), 0)
def test_copy(self):
"""Test that we can create a shallow copy"""
# segments
S = adapter.SFFSegmentList()
_no_segments = _random_integer(start=2, stop=10)
[S.append(adapter.SFFSegment()) for _ in _xrange(_no_segments)]
R = S
self.assertEqual(id(S), id(R))
R = S.copy()
self.assertIsInstance(R, type(S))
self.assertNotEqual(id(S), id(R))
# software list
C = adapter.SFFSoftwareList()
_no_complexes = _random_integer(start=2, stop=10)
[C.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(_no_complexes)]
D = C
self.assertEqual(id(C), id(D))
D = C.copy()
self.assertIsInstance(D, type(C))
self.assertNotEqual(id(C), id(D))
# shapes
Sh = adapter.SFFShapePrimitiveList()
_no_shapes = 3
Sh.append(adapter.SFFCone())
Sh.append(adapter.SFFCuboid())
Sh.append(adapter.SFFCylinder())
Sh.append(adapter.SFFEllipsoid())
Rh = Sh
self.assertEqual(id(Sh), id(Rh))
Rh = Sh.copy()
self.assertIsInstance(Rh, type(Sh))
self.assertNotEqual(id(Sh), id(Rh))
def test_extend(self):
"""Test that we can extend a `SFFListType` subclass with another"""
# segments
S1 = adapter.SFFSegmentList()
_no_segments1 = _random_integer(start=2, stop=10)
[S1.append(adapter.SFFSegment()) for _ in _xrange(_no_segments1)]
S2 = adapter.SFFSegmentList()
_no_segments2 = _random_integer(start=2, stop=10)
[S2.append(adapter.SFFSegment()) for _ in _xrange(_no_segments2)]
self.assertEqual(len(S1), _no_segments1)
self.assertEqual(len(S2), _no_segments2)
S1.extend(S2)
self.assertEqual(len(S1), _no_segments1 + _no_segments2)
# software list
Sw1 = adapter.SFFSoftwareList()
_no_sw1 = _random_integer(start=2, stop=10)
[Sw1.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(_no_sw1)]
Sw2 = adapter.SFFSoftwareList()
_no_sw2 = _random_integer(start=2, stop=10)
[Sw2.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(_no_sw2)]
self.assertEqual(len(Sw1), _no_sw1)
self.assertEqual(len(Sw2), _no_sw2)
Sw1.extend(Sw2)
self.assertEqual(len(Sw1), _no_sw1 + _no_sw2)
# exceptions
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
S1.extend(Sw1)
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
Sw1.extend(S1)
def test_insert(self):
"""Test that we can perform an insert"""
# segments
S = adapter.SFFSegmentList()
_no_segments = _random_integer(start=2, stop=10)
[S.append(adapter.SFFSegment()) for _ in _xrange(_no_segments)]
self.assertEqual(len(S), _no_segments)
s = adapter.SFFSegment()
S.insert(1, s)
self.assertEqual(len(S), _no_segments + 1)
self.assertEqual(S[1].id, s.id)
# software list
Sw = adapter.SFFSoftwareList()
_no_sw = _random_integer(start=2, stop=10)
[Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(_no_sw)]
self.assertEqual(len(Sw), _no_sw)
sw = adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())
Sw.insert(1, sw)
self.assertEqual(len(Sw), _no_sw + 1)
self.assertEqual(Sw[1], sw)
# exceptions
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
S.insert(1, sw)
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
Sw.insert(1, s)
def test_pop(self):
"""Test that we can pop items off"""
# segments
S = adapter.SFFSegmentList()
s0 = adapter.SFFSegment()
S.append(s0)
s1 = S.pop()
self.assertEqual(len(S), 0)
self.assertIsInstance(s1, adapter.SFFSegment)
self.assertEqual(s0.id, s1.id) # ensure we are not creating a new one
# pop with index
S.append(adapter.SFFSegment())
S.append(adapter.SFFSegment())
S.append(adapter.SFFSegment())
s = S.pop(index=1)
self.assertEqual(len(S), 2)
self.assertIsInstance(s, adapter.SFFSegment)
# software list
Sw = adapter.SFFSoftwareList()
sw0 = adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())
Sw.append(sw0)
sw1 = Sw.pop()
self.assertEqual(len(Sw), 0)
self.assertIsInstance(sw1, adapter.SFFSoftware)
self.assertEqual(sw0, sw1)
# pop with index
Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word()))
Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word()))
Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word()))
sw = Sw.pop(index=1)
self.assertEqual(len(Sw), 2)
self.assertIsInstance(sw, adapter.SFFSoftware)
# shapes
Sh = adapter.SFFShapePrimitiveList()
sh00 = adapter.SFFCone()
Sh.append(sh00)
sh01 = adapter.SFFEllipsoid()
Sh.append(sh01)
sh11 = Sh.pop()
sh10 = Sh.pop()
self.assertEqual(len(Sh), 0)
self.assertIsInstance(sh11, adapter.SFFEllipsoid)
self.assertIsInstance(sh10, adapter.SFFCone)
self.assertEqual(sh00.id, sh10.id)
self.assertEqual(sh01.id, sh11.id)
# pop with index
Sh.append(adapter.SFFCylinder())
Sh.append(adapter.SFFCylinder())
Sh.append(adapter.SFFCuboid())
sh = Sh.pop(index=1)
self.assertEqual(len(Sh), 2)
self.assertIsInstance(sh, adapter.SFFCylinder)
def test_remove(self):
"""Test remove from list"""
# segments
S = adapter.SFFSegmentList()
s = adapter.SFFSegment(id=1)
S.append(s)
self.assertEqual(len(S), 1)
S.remove(s)
self.assertEqual(len(S), 0)
self.assertEqual(len(S), len(S.get_ids()))
# shapes
Sh = adapter.SFFShapePrimitiveList()
sh = adapter.SFFCuboid(id=1)
Sh.append(sh)
self.assertEqual(len(Sh), 1)
Sh.remove(sh)
self.assertEqual(len(Sh), 0)
# exceptions
_sw = rw.random_word()
with self.assertRaisesRegex(base.SFFTypeError, r".*is not object of type.*"):
S.remove(_sw)
def test_reverse(self):
"""Test that we can reverse the list"""
# segments
S = adapter.SFFSegmentList()
_no_segments = _random_integer(start=1, stop=10)
[S.append(adapter.SFFSegment(id=i)) for i in _xrange(_no_segments)]
_ids = list(map(lambda s: s.id, S))
S.reverse()
_rids = list(map(lambda s: s.id, S))
self.assertEqual(_ids[::-1], _rids)
# shapes
Sh = adapter.SFFShapePrimitiveList()
_no_shapes = _random_integer(start=1, stop=10)
[Sh.append(adapter.SFFCone(id=i)) for i in _xrange(_no_shapes)]
_ids = list(map(lambda sh: sh.id, Sh))
Sh.reverse()
_rids = list(map(lambda sh: sh.id, Sh))
self.assertEqual(_ids[::-1], _rids)
def test_errors(self):
"""Test that the right exceptions are raised"""
class _Segments(adapter.SFFSegmentList):
iter_attr = ()
with self.assertRaises(ValueError):
_Segments()
class _Segments(adapter.SFFSegmentList):
iter_attr = (1, adapter.SFFSegment)
with self.assertRaises(base.SFFTypeError):
_Segments()
class _Segments(adapter.SFFSegmentList):
iter_attr = ('segment', float)
with self.assertRaises(base.SFFTypeError):
_Segments()
def test_get_ids(self):
"""Test that we can get IDs of contained objects"""
# segments
S = adapter.SFFSegmentList()
_no_items = _random_integer(start=1, stop=10)
[S.append(adapter.SFFSegment()) for _ in _xrange(_no_items)]
self.assertEqual(list(S.get_ids()), list(_xrange(1, _no_items + 1)))
# following changes to how _id_dict is now updated
# appending items direct from gds types changes the length of ID list because a new id is issued
S.append(adapter.SFFSegment.from_gds_type(emdb_sff.segment_type()))
self.assertEqual(list(S.get_ids()), list(_xrange(1, _no_items + 2)))
# software list
Sw = adapter.SFFSoftwareList()
[Sw.append(adapter.SFFSoftware(name=rw.random_word(), version=rw.random_word())) for _ in _xrange(_no_items)]
self.assertEqual(list(Sw.get_ids()), list(_xrange(_no_items)))
# shapes
Sh = adapter.SFFShapePrimitiveList()
[Sh.append(adapter.SFFCone()) for _ in _xrange(_no_items)]
[Sh.append(adapter.SFFCuboid()) for _ in _xrange(_no_items)]
[Sh.append(adapter.SFFCylinder()) for _ in _xrange(_no_items)]
[Sh.append(adapter.SFFEllipsoid()) for _ in _xrange(_no_items)]
self.assertEqual(list(Sh.get_ids()), list(_xrange(_no_items * 4)))
def test_get_by_id(self):
"""Test that we can get an item by ID"""
# segments
S = adapter.SFFSegmentList()
s0 = adapter.SFFSegment(biologicalAnnotation=adapter.SFFBiologicalAnnotation())
S.append(s0)
s1 = S.get_by_id(1)
self.assertEqual(s0.id, s1.id)
# appending/setting/inserting a new item immediately makes it available on the dict
s0 = adapter.SFFSegment(id=1000)
S.append(s0)
s1 = S.get_by_id(1000)
self.assertEqual(s0.id, s1.id)
with self.assertRaises(KeyError):
S.get_by_id(1001)
# popping/removing removes from the dict
S = adapter.SFFSegmentList()
S.append(adapter.SFFSegment())
s_id = S[-1].id
s = S.pop()
self.assertEqual(s.id, s_id)
with self.assertRaises(KeyError):
S.get_by_id(s_id)
# clearing clears the dict
S = adapter.SFFSegmentList()
_no_items = _random_integer(start=2, stop=10)
[S.append(adapter.SFFSegment()) for _ in _xrange(_no_items)]
self.assertTrue(len(S) > 1)
S.clear()
with self.assertRaises(KeyError):
S.get_by_id(1)
# extending extends the dict
S1 = adapter.SFFSegmentList()
[S1.append(adapter.SFFSegment()) for _ in _xrange(_no_items)]
S2 = adapter.SFFSegmentList()
[S2.append(adapter.SFFSegment()) for _ in _xrange(_no_items * 2)]
S2.extend(S1)
s_id = random.choice(list(S2.get_ids()))
self.assertIsInstance(S2.get_by_id(s_id), adapter.SFFSegment)
self.assertEqual(len(S2), _no_items * 3)
# reversing has no impact
S = S2.copy()
S.reverse()
s_id = random.choice(list(S.get_ids()))
self.assertEqual(S.get_by_id(s_id).id, S2.get_by_id(s_id).id)
# exceptions
# ID collisions
S = adapter.SFFSegmentList()
S.append(adapter.SFFSegment()) # id=1
# let's reset the index on SFFSegment; this will create a collision of ids (old has id=1, new has id=1)
# when we append both to the same SFFSegmentList we expect the second to have id=2
adapter.SFFSegment.reset_id()
# we should
s = adapter.SFFSegment() # id=1
self.assertEqual(s.id, 1)
S.append(s)
self.assertEqual(s.id, 2)
# the id should auto-increment regardless of how many have been added before
added_segments = _random_integer(start=3, stop=10)
[S.append(adapter.SFFSegment()) for _ in _xrange(added_segments)]
S.append(adapter.SFFSegment(id=added_segments))
# there are as many ids as the length i.e. all items are in the id_dict
self.assertEqual(len(S), len(S.get_ids()))
def test_get_from_segmentation(self):
"""Test that we can get by ID from the top level
- segmentation.
"""
# create a segmentation
S = adapter.SFFSegmentation(name='my segmentation')
# set the segments attribute
S.segments = adapter.SFFSegmentList()
s = adapter.SFFSegment()
# add a segment
S.segments.append(s)
s_get = S.segments.get_by_id(1)
self.assertEqual(s.id, 1)
self.assertEqual(s_get.id, 1)
self.assertEqual(len(S.segments), 1)
s_index = S.segments[0]
self.assertEqual(s_index.id, 1)
self.assertEqual(s_get.id, 1)
self.assertEqual(len(S.segments), 1)
def test_validation(self):
"""Test that the list-type passes validation
Validation is based on the `min_length` attribute which is `0` by default
If a list-type has `min_length>0` and the list is fewer than the minimum items then it
should fail validation.
An example of this is `SFFVertexList` which should have at least 3 vertices (for a triangle)
"""
class T(base.SFFListType):
gds_type = emdb_sff.transform_listType
min_length = 1
iter_attr = (u'transformation_matrix', adapter.SFFTransformationMatrix)
repr_string = u"Transform({})"
repr_args = (u'list()',)
t = T()
self.assertFalse(t._is_valid())
[t.append(
adapter.SFFTransformationMatrix(data=numpy.random.rand(5, 5))
) for _ in _xrange(_random_integer(start=3, stop=10))]
self.assertTrue(t._is_valid())
class TestSFFAttribute(Py23FixTestCase):
"""Test the attribute descriptor class"""
def test_default(self):
"""Test default settings"""
class _Colour(adapter.SFFType):
gds_type = emdb_sff.rgba_type
r = base.SFFAttribute('red', help='red colour')
g = base.SFFAttribute('green', help='green colour')
b = base.SFFAttribute('blue', help='blue colour')
a = base.SFFAttribute('alpha', help='alpha colour')
_r, _g, _b, _a = _random_floats(count=4)
_c = _Colour(red=_r, green=_g, blue=_b, alpha=_a)
self.assertEqual(_c.r, _r)
self.assertEqual(_c.g, _g)
self.assertEqual(_c.b, _b)
self.assertEqual(_c.a, _a)
# delete alpha
del _c.a
self.assertIsNone(_c.a)
def test_list_attribute(self):
"""Test that an empty list attribute does not return 'None'"""
class _BA(adapter.SFFType):
gds_type = emdb_sff.biological_annotationType
repr_string = """_BA(external_references={})"""
repr_args = ('extref',)
extref = base.SFFAttribute(
'external_references',
sff_type=adapter.SFFExternalReferenceList,
help='the list of external reference objects'
)
ba = _BA()
self.assertIsInstance(ba.extref, adapter.SFFExternalReferenceList)
self.assertEqual(len(ba.extref), 0)
def test_error(self):
"""Test that we get an exception on setting wrong type"""
class _Segmentation(adapter.SFFType):
gds_type = emdb_sff.segmentation
s = base.SFFAttribute('software', sff_type=adapter.SFFSoftware)
_S = _Segmentation()
with self.assertRaises(base.SFFTypeError):
_S.s = adapter.SFFSegment()
def test_default_value(self):
"""Test setting a default value to the attribute"""
class _BA(adapter.SFFType):
gds_type = emdb_sff.biological_annotationType
no = base.SFFAttribute('number_of_instances', default=1)
# explicit
_b = _BA(number_of_instances=33)
self.assertEqual(_b.no, 33)
# default
_b = _BA()
# _print('_b.no', _b.no)
self.assertEqual(_b.no, 1)
def test_required_value(self):
"""Test setting a required attribute"""
class _ER(adapter.SFFType):
gds_type = emdb_sff.external_reference_type
t = base.SFFAttribute('type', required=True)
_e = _ER()
self.assertFalse(_e._is_valid())
_e.t = rw.random_word()
self.assertTrue(_e._is_valid())
| [
"random_words.LoremIpsum",
"importlib.import_module",
"numpy.random.rand",
"random_words.RandomWords",
"numpy.random.randint",
"tempfile.NamedTemporaryFile"
] | [((250, 263), 'random_words.RandomWords', 'RandomWords', ([], {}), '()\n', (261, 263), False, 'from random_words import RandomWords, LoremIpsum\n'), ((269, 281), 'random_words.LoremIpsum', 'LoremIpsum', ([], {}), '()\n', (279, 281), False, 'from random_words import RandomWords, LoremIpsum\n'), ((822, 860), 'importlib.import_module', 'importlib.import_module', (['emdb_sff_name'], {}), '(emdb_sff_name)\n', (845, 860), False, 'import importlib\n'), ((871, 908), 'importlib.import_module', 'importlib.import_module', (['adapter_name'], {}), '(adapter_name)\n', (894, 908), False, 'import importlib\n'), ((5848, 5877), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (5875, 5877), False, 'import tempfile\n'), ((6610, 6639), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (6637, 6639), False, 'import tempfile\n'), ((7373, 7402), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (7400, 7402), False, 'import tempfile\n'), ((7952, 7981), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (7979, 7981), False, 'import tempfile\n'), ((4661, 4704), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(10)'], {'size': '(5, 5, 5)'}), '(0, 10, size=(5, 5, 5))\n', (4681, 4704), False, 'import numpy\n'), ((37998, 38021), 'numpy.random.rand', 'numpy.random.rand', (['(5)', '(5)'], {}), '(5, 5)\n', (38015, 38021), False, 'import numpy\n')] |
from collections import OrderedDict as ODict
from typing import IO, Sequence, Tuple, Union, List, Dict
from itertools import repeat
import librosa
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as tckr
from numpy import ndarray
import scipy.signal as scsig
from hparams import hp
import generic as gen
from matlab_lib import Evaluation as EvalModule
EVAL_METRICS = EvalModule.metrics
def calc_snrseg_time(clean: Union[List, ndarray], est: Union[List, ndarray],
l_frame: int, l_hop: int, T_ys: Sequence[int] = None) \
-> float:
_LIM_UPPER = 35. / 10. # clip at 35 dB
_LIM_LOWER = -10. / 10. # clip at -10 dB
if isinstance(clean, ndarray) and clean.ndim == 1:
clean = clean[np.newaxis, ...]
est = est[np.newaxis, ...]
if T_ys is None:
T_ys = (clean.shape[-1],)
win = scsig.windows.hann(l_frame, False)[:, np.newaxis]
sum_result = 0.
for T, item_clean, item_est in zip(T_ys, clean, est):
l_pad = l_frame - (T - l_frame) % l_hop
item_clean = np.pad(item_clean[:T], (0, l_pad), 'constant')
item_est = np.pad(item_est[:T], (0, l_pad), 'constant')
clean_frames = librosa.util.frame(item_clean, l_frame, l_hop) * win
est_frames = librosa.util.frame(item_est, l_frame, l_hop) * win
# T
norm_clean = np.linalg.norm(clean_frames, ord=2, axis=0)
norm_err = (np.linalg.norm(est_frames - clean_frames, ord=2, axis=0)
+ np.finfo(np.float32).eps)
snrseg = np.log10(norm_clean / norm_err + np.finfo(np.float32).eps)
np.minimum(snrseg, _LIM_UPPER, out=snrseg)
np.maximum(snrseg, _LIM_LOWER, out=snrseg)
sum_result += snrseg.mean()
sum_result *= 10
return sum_result
def calc_using_eval_module(y_clean: Union[List, ndarray], y_est: Union[List, ndarray],
T_ys: Sequence[int] = (0,)) -> ODict:
""" calculate metric using EvalModule. y can be a batch.
:param y_clean:
:param y_est:
:param T_ys:
:return:
"""
if isinstance(y_clean, ndarray) and y_clean.ndim == 1:
y_clean = y_clean[np.newaxis, ...]
y_est = y_est[np.newaxis, ...]
if T_ys == (0,):
T_ys = (y_clean.shape[1],) * y_clean.shape[0]
keys = None
sum_result = None
for T, item_clean, item_est in zip(T_ys, y_clean, y_est):
# noinspection PyArgumentList,PyTypeChecker
temp: ODict = EvalModule(item_clean[:T], item_est[:T], hp.sample_rate)
result = np.array(list(temp.values()))
if not keys:
keys = temp.keys()
sum_result = result
else:
sum_result += result
return ODict(zip(keys, sum_result.tolist()))
def wav2spec(data: Dict) -> Dict:
""""
convert wav files into magnitude spectrogram
:return:
"""
spec = dict()
for key, value in data.items():
if len(key) < 4:
value = value.squeeze()
s = librosa.core.stft(value, **hp.kwargs_stft)
spec[key] = np.abs(s)
err = (np.abs(spec['out'] - spec['y']) ** 2) / (np.abs(spec['y']) ** 2 + 1e-6)
err = np.maximum(10 * np.log10(err), -20.0)
spec['err'] = err
return spec
def draw_spectrogram(data: gen.TensArr, to_db=True, show=False, dpi=150, **kwargs):
"""
:param data:
:param to_db:
:param show:
:param err:
:param dpi:
:param kwargs: vmin, vmax
:return:
"""
data = data.squeeze()
data = gen.convert(data, astype=ndarray)
if to_db:
data = librosa.amplitude_to_db(data)
fig, ax = plt.subplots(dpi=dpi)
ax.imshow(data,
cmap=plt.get_cmap('CMRmap'),
extent=(0, data.shape[1], 0, hp.sample_rate // 2),
origin='lower', aspect='auto', **kwargs)
ax.set_xlabel('Frame Index')
ax.set_ylabel('Frequency (Hz)')
fig.colorbar(ax.images[0], format='%+2.0f dB')
fig.tight_layout()
if show:
fig.show()
return fig
def draw_audio(data: gen.TensArr, fs: int, show=False, xlim=None, ylim=(-1, 1)):
data = data.squeeze()
data = gen.convert(data, astype=ndarray)
t_axis = np.arange(len(data)) / fs
if xlim is None:
xlim = (0, t_axis[-1])
fig, ax = plt.subplots(figsize=(xlim[1] * 10, 2), dpi=150)
ax.plot(t_axis, data)
ax.set_xlabel('time')
ax.xaxis.set_major_locator(tckr.MultipleLocator(0.5))
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
fig.tight_layout()
if show:
fig.show()
return fig
def principle_(angle):
angle += np.pi
angle %= (2 * np.pi)
angle -= np.pi
return angle
def reconstruct_wave(*args: ndarray,
n_iter=0, momentum=0., n_sample=-1,
**kwargs_istft) -> ndarray:
""" reconstruct time-domain wave from spectrogram
:param args: can be (mag, phase) or (complex spectrogram,) or (mag,)
:param n_iter: no. of iteration of griffin-lim. 0 for not using griffin-lim.
:param momentum: fast griffin-lim algorithm momentum
:param n_sample: number of time samples of output wave
:param kwargs_istft: kwargs for librosa.istft
:return:
"""
if len(args) == 1:
if np.iscomplexobj(args[0]):
spec = args[0].squeeze()
mag = None
phase = None
else:
spec = None
mag = args[0].squeeze()
# random initial phase
phase = np.exp(2j * np.pi * np.random.rand(*mag.shape).astype(mag.dtype))
elif len(args) == 2:
spec = None
mag = args[0].squeeze()
phase = args[1].squeeze()
assert np.isrealobj(mag) and np.isrealobj(phase)
else:
raise ValueError
if not kwargs_istft:
kwargs_istft = hp.kwargs_istft
kwargs_stft = hp.kwargs_stft
else:
kwargs_stft = dict(n_fft=hp.n_fft, **kwargs_istft)
spec_prev = 0
for _ in range(n_iter - 1):
if mag is None:
mag = np.abs(spec)
phase = np.angle(spec)
spec = None
wave = librosa.istft(mag * np.exp(1j * phase), **kwargs_istft)
spec_new = librosa.stft(wave, **kwargs_stft)
phase = np.angle(spec_new - (momentum / (1 + momentum)) * spec_prev)
spec_prev = spec_new
kwarg_len = dict(length=n_sample) if n_sample != -1 else dict()
if spec is None:
spec = mag * np.exp(1j * phase)
wave = librosa.istft(spec, **kwargs_istft, **kwarg_len)
return wave | [
"librosa.istft",
"numpy.isrealobj",
"librosa.util.frame",
"numpy.log10",
"numpy.random.rand",
"generic.convert",
"numpy.linalg.norm",
"numpy.exp",
"numpy.maximum",
"numpy.abs",
"scipy.signal.windows.hann",
"numpy.iscomplexobj",
"numpy.finfo",
"matlab_lib.Evaluation",
"matplotlib.pyplot.g... | [((3525, 3558), 'generic.convert', 'gen.convert', (['data'], {'astype': 'ndarray'}), '(data, astype=ndarray)\n', (3536, 3558), True, 'import generic as gen\n'), ((3633, 3654), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': 'dpi'}), '(dpi=dpi)\n', (3645, 3654), True, 'import matplotlib.pyplot as plt\n'), ((4150, 4183), 'generic.convert', 'gen.convert', (['data'], {'astype': 'ndarray'}), '(data, astype=ndarray)\n', (4161, 4183), True, 'import generic as gen\n'), ((4290, 4338), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(xlim[1] * 10, 2)', 'dpi': '(150)'}), '(figsize=(xlim[1] * 10, 2), dpi=150)\n', (4302, 4338), True, 'import matplotlib.pyplot as plt\n'), ((6465, 6513), 'librosa.istft', 'librosa.istft', (['spec'], {}), '(spec, **kwargs_istft, **kwarg_len)\n', (6478, 6513), False, 'import librosa\n'), ((875, 909), 'scipy.signal.windows.hann', 'scsig.windows.hann', (['l_frame', '(False)'], {}), '(l_frame, False)\n', (893, 909), True, 'import scipy.signal as scsig\n'), ((1073, 1119), 'numpy.pad', 'np.pad', (['item_clean[:T]', '(0, l_pad)', '"""constant"""'], {}), "(item_clean[:T], (0, l_pad), 'constant')\n", (1079, 1119), True, 'import numpy as np\n'), ((1139, 1183), 'numpy.pad', 'np.pad', (['item_est[:T]', '(0, l_pad)', '"""constant"""'], {}), "(item_est[:T], (0, l_pad), 'constant')\n", (1145, 1183), True, 'import numpy as np\n'), ((1366, 1409), 'numpy.linalg.norm', 'np.linalg.norm', (['clean_frames'], {'ord': '(2)', 'axis': '(0)'}), '(clean_frames, ord=2, axis=0)\n', (1380, 1409), True, 'import numpy as np\n'), ((1620, 1662), 'numpy.minimum', 'np.minimum', (['snrseg', '_LIM_UPPER'], {'out': 'snrseg'}), '(snrseg, _LIM_UPPER, out=snrseg)\n', (1630, 1662), True, 'import numpy as np\n'), ((1671, 1713), 'numpy.maximum', 'np.maximum', (['snrseg', '_LIM_LOWER'], {'out': 'snrseg'}), '(snrseg, _LIM_LOWER, out=snrseg)\n', (1681, 1713), True, 'import numpy as np\n'), ((2478, 2534), 'matlab_lib.Evaluation', 'EvalModule', (['item_clean[:T]', 'item_est[:T]', 'hp.sample_rate'], {}), '(item_clean[:T], item_est[:T], hp.sample_rate)\n', (2488, 2534), True, 'from matlab_lib import Evaluation as EvalModule\n'), ((3588, 3617), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['data'], {}), '(data)\n', (3611, 3617), False, 'import librosa\n'), ((4422, 4447), 'matplotlib.ticker.MultipleLocator', 'tckr.MultipleLocator', (['(0.5)'], {}), '(0.5)\n', (4442, 4447), True, 'import matplotlib.ticker as tckr\n'), ((5249, 5273), 'numpy.iscomplexobj', 'np.iscomplexobj', (['args[0]'], {}), '(args[0])\n', (5264, 5273), True, 'import numpy as np\n'), ((6183, 6216), 'librosa.stft', 'librosa.stft', (['wave'], {}), '(wave, **kwargs_stft)\n', (6195, 6216), False, 'import librosa\n'), ((6234, 6292), 'numpy.angle', 'np.angle', (['(spec_new - momentum / (1 + momentum) * spec_prev)'], {}), '(spec_new - momentum / (1 + momentum) * spec_prev)\n', (6242, 6292), True, 'import numpy as np\n'), ((1207, 1253), 'librosa.util.frame', 'librosa.util.frame', (['item_clean', 'l_frame', 'l_hop'], {}), '(item_clean, l_frame, l_hop)\n', (1225, 1253), False, 'import librosa\n'), ((1281, 1325), 'librosa.util.frame', 'librosa.util.frame', (['item_est', 'l_frame', 'l_hop'], {}), '(item_est, l_frame, l_hop)\n', (1299, 1325), False, 'import librosa\n'), ((1430, 1486), 'numpy.linalg.norm', 'np.linalg.norm', (['(est_frames - clean_frames)'], {'ord': '(2)', 'axis': '(0)'}), '(est_frames - clean_frames, ord=2, axis=0)\n', (1444, 1486), True, 'import numpy as np\n'), ((3009, 3051), 'librosa.core.stft', 'librosa.core.stft', (['value'], {}), '(value, **hp.kwargs_stft)\n', (3026, 3051), False, 'import librosa\n'), ((3076, 3085), 'numpy.abs', 'np.abs', (['s'], {}), '(s)\n', (3082, 3085), True, 'import numpy as np\n'), ((3098, 3129), 'numpy.abs', 'np.abs', (["(spec['out'] - spec['y'])"], {}), "(spec['out'] - spec['y'])\n", (3104, 3129), True, 'import numpy as np\n'), ((3196, 3209), 'numpy.log10', 'np.log10', (['err'], {}), '(err)\n', (3204, 3209), True, 'import numpy as np\n'), ((3694, 3716), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""CMRmap"""'], {}), "('CMRmap')\n", (3706, 3716), True, 'import matplotlib.pyplot as plt\n'), ((6021, 6033), 'numpy.abs', 'np.abs', (['spec'], {}), '(spec)\n', (6027, 6033), True, 'import numpy as np\n'), ((6054, 6068), 'numpy.angle', 'np.angle', (['spec'], {}), '(spec)\n', (6062, 6068), True, 'import numpy as np\n'), ((6435, 6455), 'numpy.exp', 'np.exp', (['(1.0j * phase)'], {}), '(1.0j * phase)\n', (6441, 6455), True, 'import numpy as np\n'), ((1509, 1529), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1517, 1529), True, 'import numpy as np\n'), ((3139, 3156), 'numpy.abs', 'np.abs', (["spec['y']"], {}), "(spec['y'])\n", (3145, 3156), True, 'import numpy as np\n'), ((5681, 5698), 'numpy.isrealobj', 'np.isrealobj', (['mag'], {}), '(mag)\n', (5693, 5698), True, 'import numpy as np\n'), ((5703, 5722), 'numpy.isrealobj', 'np.isrealobj', (['phase'], {}), '(phase)\n', (5715, 5722), True, 'import numpy as np\n'), ((6128, 6148), 'numpy.exp', 'np.exp', (['(1.0j * phase)'], {}), '(1.0j * phase)\n', (6134, 6148), True, 'import numpy as np\n'), ((1586, 1606), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1594, 1606), True, 'import numpy as np\n'), ((5509, 5535), 'numpy.random.rand', 'np.random.rand', (['*mag.shape'], {}), '(*mag.shape)\n', (5523, 5535), True, 'import numpy as np\n')] |
import torch
import numpy as np
import matplotlib.pyplot as plt
from operator import itemgetter
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve
plt.style.use('fivethirtyeight')
def odds(prob):
return prob / (1 - prob)
def log_odds(prob):
return np.log(odds(prob))
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def split_cm(cm):
# Actual negatives go in the top row,
# above the probability line
actual_negative = cm[0]
# Predicted negatives go in the first column
tn = actual_negative[0]
# Predicted positives go in the second column
fp = actual_negative[1]
# Actual positives go in the bottow row,
# below the probability line
actual_positive = cm[1]
# Predicted negatives go in the first column
fn = actual_positive[0]
# Predicted positives go in the second column
tp = actual_positive[1]
return tn, fp, fn, tp
def tpr_fpr(cm):
tn, fp, fn, tp = split_cm(cm)
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
return tpr, fpr
def precision_recall(cm):
tn, fp, fn, tp = split_cm(cm)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
return precision, recall
def probability_line(ax, y, probs, threshold, shift=0.0, annot=False, colors=None):
if colors is None:
colors = ['r', 'b']
ax.grid(False)
ax.set_ylim([-.1, .1])
ax.axes.get_yaxis().set_visible(False)
ax.plot([0, 1], [0, 0], linewidth=2, c='k', zorder=1)
ax.plot([0, 0], [-.1, .1], c='k', zorder=1)
ax.plot([1, 1], [-.1, .1], c='k', zorder=1)
tn = (y == 0) & (probs < threshold)
fn = (y == 0) & (probs >= threshold)
tp = (y == 1) & (probs >= threshold)
fp = (y == 1) & (probs < threshold)
ax.plot([threshold, threshold], [-.1, .1], c='k', zorder=1, linestyle='--')
ax.scatter(probs[tn], np.zeros(tn.sum()) + shift, c=colors[0], s=150, zorder=2, edgecolor=colors[0], linewidth=3)
ax.scatter(probs[fn], np.zeros(fn.sum()) + shift, c=colors[0], s=150, zorder=2, edgecolor=colors[1], linewidth=3)
ax.scatter(probs[tp], np.zeros(tp.sum()) - shift, c=colors[1], s=150, zorder=2, edgecolor=colors[1], linewidth=3)
ax.scatter(probs[fp], np.zeros(fp.sum()) - shift, c=colors[1], s=150, zorder=2, edgecolor=colors[0], linewidth=3)
ax.set_xlabel(r'$\sigma(z) = P(y=1)$')
ax.set_title('Threshold = {}'.format(threshold))
if annot:
ax.annotate('TN', xy=(.20, .03), c='k', weight='bold', fontsize=20)
ax.annotate('FN', xy=(.20, -.08), c='k', weight='bold', fontsize=20)
ax.annotate('FP', xy=(.70, .03), c='k', weight='bold', fontsize=20)
ax.annotate('TP', xy=(.70, -.08), c='k', weight='bold', fontsize=20)
return ax
def probability_contour(ax, model, device, X, y, threshold, cm=None, cm_bright=None):
if cm is None:
cm = plt.cm.RdBu
if cm_bright is None:
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
h = .02 # step size in the mesh
x_min, x_max = -2.25, 2.25
y_min, y_max = -2.25, 2.25
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
logits = model(torch.as_tensor(np.c_[xx.ravel(), yy.ravel()]).float().to(device))
logits = logits.detach().cpu().numpy().reshape(xx.shape)
yhat = sigmoid(logits)
ax.contour(xx, yy, yhat, levels=[threshold], cmap="Greys", vmin=0, vmax=1)
contour = ax.contourf(xx, yy, yhat, 25, cmap=cm, alpha=.8, vmin=0, vmax=1)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright, edgecolors='k')
# Plot the testing points
#ax.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title(r'$\sigma(z) = P(y=1)$')
ax.grid(False)
ax_c = plt.colorbar(contour)
ax_c.set_ticks([0, .25, .5, .75, 1])
return ax
def eval_curves_from_probs(y, probabilities, threshs, line=False, annot=False):
cms = [confusion_matrix(y, (probabilities >= threshold)) for threshold in threshs]
rates = np.array(list(map(tpr_fpr, cms)))
precrec = np.array(list(map(precision_recall, cms)))
return eval_curves(rates[:, 1], rates[:, 0], precrec[:, 1], precrec[:, 0], threshs, line=line, annot=annot)
def eval_curves(fprs, tprs, recalls, precisions, thresholds, thresholds2=None, line=False, annot=False):
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
if thresholds2 is None:
thresholds2 = thresholds[:]
marker = '.r-' if line else '.r'
axs[0].plot(fprs, tprs, marker, markersize=12, linewidth=2)
axs[0].set_xlim([-.05, 1.05])
axs[0].set_ylim([-.05, 1.05])
axs[0].set_xlabel('False Positive Rate')
axs[0].set_ylabel('True Positive Rate')
axs[0].set_title('ROC Curve')
axs[1].plot(recalls, precisions, marker, markersize=12, linewidth=2)
axs[1].set_xlim([-.05, 1.05])
axs[1].set_ylim([-.05, 1.05])
axs[1].set_xlabel('Recall')
axs[1].set_ylabel('Precision')
axs[1].set_title('Precision-Recall Curve')
if annot:
for thresh, fpr, tpr, prec, rec in zip(thresholds, fprs, tprs, precisions, recalls):
axs[0].annotate(str(thresh), xy=(fpr - .03, tpr - .07))
for thresh, fpr, tpr, prec, rec in zip(thresholds2, fprs, tprs, precisions, recalls):
axs[1].annotate(str(thresh), xy=(rec - .03, prec - .07))
fig.tight_layout()
return fig
def figure1(X_train, y_train, X_val, y_val, cm_bright=None):
if cm_bright is None:
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)#, edgecolors='k')
ax[0].set_xlabel(r'$X_1$')
ax[0].set_ylabel(r'$X_2$')
ax[0].set_xlim([-2.3, 2.3])
ax[0].set_ylim([-2.3, 2.3])
ax[0].set_title('Generated Data - Train')
ax[1].scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright)#, edgecolors='k')
ax[1].set_xlabel(r'$X_1$')
ax[1].set_ylabel(r'$X_2$')
ax[1].set_xlim([-2.3, 2.3])
ax[1].set_ylim([-2.3, 2.3])
ax[1].set_title('Generated Data - Validation')
fig.tight_layout()
return fig
def figure2(prob1):
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
prob = np.linspace(.01, .99, 99)
for i in [0, 1]:
ax[i].plot(prob, odds(prob), linewidth=2)
ax[i].set_xlabel('Probability')
if i:
ax[i].set_yscale('log')
ax[i].set_ylabel('Odds Ratio (log scale)')
ax[i].set_title('Odds Ratio (log scale)')
else:
ax[i].set_ylabel('Odds Ratio')
ax[i].set_title('Odds Ratio')
ax[i].scatter([prob1, .5, (1-prob1)], [odds(prob1), odds(.5), odds(1-prob1)], c='r')
fig.tight_layout()
return fig
def figure3(prob1):
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
prob = np.linspace(.01, .99, 99)
ax[0].plot(prob, log_odds(prob), linewidth=2)
ax[0].set_xlabel('Probability')
ax[0].set_ylabel('Log Odds Ratio')
ax[0].set_title('Log Odds Ratio')
ax[0].scatter([prob1, .5, (1-prob1)], [log_odds(prob1), log_odds(.5), log_odds(1-prob1)], c='r')
ax[1].plot(log_odds(prob), prob, linewidth=2)
ax[1].set_ylabel('Probability')
ax[1].set_xlabel('Log Odds Ratio')
ax[1].set_title('Probability')
ax[1].scatter([log_odds(prob1), log_odds(.5), log_odds(1-prob1)], [prob1, .5, (1-prob1)], c='r')
fig.tight_layout()
return fig
def figure4(prob1):
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
prob = np.linspace(.01, .99, 99)
ax.plot(log_odds(prob), prob, linewidth=2, c='r')
ax.set_ylabel('Probability')
ax.set_xlabel('Log Odds Ratio')
ax.set_title('Sigmoid')
ax.scatter([log_odds(prob1), log_odds(.5), log_odds(1-prob1)], [prob1, .5, (1-prob1)], c='r')
fig.tight_layout()
return fig
def figure7(X, y, model, device, cm=None, cm_bright=None):
if cm is None:
cm = plt.cm.RdBu
if cm_bright is None:
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
fig = plt.figure(figsize=(15, 4.5))
h = .02 # step size in the mesh
# x_min, x_max = X_train[:, 0].min() - .5, X_train[:, 0].max() + .5
# y_min, y_max = X_train[:, 1].min() - .5, X_train[:, 1].max() + .5
x_min, x_max = -2.25, 2.25
y_min, y_max = -2.25, 2.25
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
logits = model(torch.as_tensor(np.c_[xx.ravel(), yy.ravel()]).float().to(device))
logits = logits.detach().cpu().numpy().reshape(xx.shape)
yhat = sigmoid(logits)
# 1st plot
ax = plt.subplot(1, 3, 1)
contour = ax.contourf(xx, yy, logits, 25, cmap=cm, alpha=.8)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)
# Plot the testing points
#ax.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title(r'$z = b + w_1x_1 + w_2x_2$')
ax.grid(False)
ax_c = plt.colorbar(contour)
ax_c.set_label("$z$", rotation=0)
# 2nd plot
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(xx, yy, yhat, rstride=1, cstride=1, alpha=.5, cmap=cm, linewidth=0, antialiased=True, vmin=0, vmax=1)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)
# Plot the testing points
#ax.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title(r'$\sigma(z) = P(y=1)$')
ax_c = plt.colorbar(surf)
ax_c.set_ticks([0, .25, .5, .75, 1])
ax.view_init(30, 220)
# 3rd plot
ax = plt.subplot(1, 3, 3)
ax.contour(xx, yy, yhat, levels=[.5], cmap="Greys", vmin=0, vmax=1)
contour = ax.contourf(xx, yy, yhat, 25, cmap=cm, alpha=.8, vmin=0, vmax=1)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)
# Plot the testing points
#ax.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title(r'$\sigma(z) = P(y=1)$')
ax.grid(False)
ax_c = plt.colorbar(contour)
ax_c.set_ticks([0, .25, .5, .75, 1])
plt.tight_layout()
return fig
def one_dimension(x, y, colors=None):
if colors is None:
colors = ['r', 'b']
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
ax.grid(False)
ax.set_ylim([-.1, .1])
ax.axes.get_yaxis().set_visible(False)
ax.plot([-3, 3], [0, 0], linewidth=2, c='k', zorder=1)
ax.plot([0, 0], [-.03, .03], c='k', zorder=1)
ax.scatter(x[y==1], np.zeros_like(x[y==1]), c=colors[1], s=150, zorder=2, linewidth=3)
ax.scatter(x[y==0], np.zeros_like(x[y==0]), c=colors[0], s=150, zorder=2, linewidth=3)
ax.set_xlabel(r'$X_1$')
ax.set_title('One Dimension')
fig.tight_layout()
return fig
def two_dimensions(x, y, colors=None):
if colors is None:
colors = ['r', 'b']
x2 = np.concatenate([x.reshape(-1, 1), (x ** 2).reshape(-1, 1)], axis=1)
fig = plt.figure(figsize=(10, 4.5))
gs = fig.add_gridspec(3, 2)
ax = fig.add_subplot(gs[2, 0])
ax.grid(False)
ax.set_ylim([-.1, .1])
ax.axes.get_yaxis().set_visible(False)
ax.plot([-3, 3], [0, 0], linewidth=2, c='k', zorder=1)
ax.plot([0, 0], [-.03, .03], c='k', zorder=1)
ax.scatter(x[y==1], np.zeros_like(x[y==1]), c=colors[1], s=150, zorder=2, linewidth=3)
ax.scatter(x[y==0], np.zeros_like(x[y==0]), c=colors[0], s=150, zorder=2, linewidth=3)
ax.set_xlabel(r'$X_1$')
ax.set_title('One Dimension')
ax = fig.add_subplot(gs[:, 1])
ax.scatter(*x2[y==1, :].T, c='b', s=150, zorder=2, linewidth=3)
ax.scatter(*x2[y==0, :].T, c='r', s=150, zorder=2, linewidth=3)
ax.plot([-2, 2], [1, 1], 'k--', linewidth=2)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2=X_1^2$')
ax.set_title('Two Dimensions')
fig.tight_layout()
return fig
def figure9(x, y, model, device, probabilities, threshold, shift=0.0, annot=False, cm=None, cm_bright=None):
fig = plt.figure(figsize=(15, 5))
gs = fig.add_gridspec(3, 3)
ax = fig.add_subplot(gs[:, 0])
probability_contour(ax, model, device, x, y, threshold, cm, cm_bright)
if cm_bright is None:
colors = ['r', 'b']
else:
colors = cm_bright.colors
ax = fig.add_subplot(gs[1, 1:])
probability_line(ax, y, probabilities, threshold, shift, annot, colors)
fig.tight_layout()
return fig
def figure10(y, probabilities, threshold, shift, annot, colors=None):
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
probability_line(ax, y, probabilities, threshold, shift, annot, colors)
fig.tight_layout()
return fig
def figure17(y, probabilities, threshs):
cms = [confusion_matrix(y, (probabilities >= threshold)) for threshold in threshs]
rates = np.array(list(map(tpr_fpr, cms)))
precrec = np.array(list(map(precision_recall, cms)))
precrec = np.nan_to_num(precrec, nan=1.)
fig = eval_curves(rates[:, 1], rates[:, 0], precrec[:, 1], precrec[:, 0], threshs, line=True, annot=False)
return fig
def figure19(y, probabilities, threshs=(.4, .5, .57), colors=None):
fig, axs = plt.subplots(3, 1, figsize=(10, 6))
probability_line(axs[0], y, probabilities, threshs[0], 0.0, False, colors)
probability_line(axs[1], y, probabilities, threshs[1], 0.0, False, colors)
probability_line(axs[2], y, probabilities, threshs[2], 0.0, False, colors)
fig.tight_layout()
return fig
def figure20(y):
fpr_perfect, tpr_perfect, thresholds1_perfect = roc_curve(y, y)
prec_perfect, rec_perfect, thresholds2_perfect = precision_recall_curve(y, y)
fig = eval_curves(fpr_perfect, tpr_perfect, rec_perfect, prec_perfect, thresholds1_perfect, thresholds2_perfect, line=True)
return fig
def figure21(y, probabilities):
fpr_random, tpr_random, thresholds1_random = roc_curve(y, probabilities)
prec_random, rec_random, thresholds2_random = precision_recall_curve(y, probabilities)
fig = eval_curves(fpr_random, tpr_random, rec_random, prec_random, thresholds1_random, thresholds2_random, line=True)
axs = fig.axes
axs[0].plot([0, 1], [0, 1], 'k--', linewidth=2)
axs[1].plot([0, 1], [y.mean(), y.mean()], 'k--', linewidth=2)
return fig | [
"numpy.arange",
"matplotlib.pyplot.colorbar",
"sklearn.metrics.precision_recall_curve",
"matplotlib.pyplot.style.use",
"numpy.zeros_like",
"matplotlib.colors.ListedColormap",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.tight_layout",... | [((261, 293), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (274, 293), True, 'import matplotlib.pyplot as plt\n'), ((4017, 4038), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['contour'], {}), '(contour)\n', (4029, 4038), True, 'import matplotlib.pyplot as plt\n'), ((4602, 4637), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 5)'}), '(1, 2, figsize=(10, 5))\n', (4614, 4637), True, 'import matplotlib.pyplot as plt\n'), ((5820, 5855), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 6)'}), '(1, 2, figsize=(12, 6))\n', (5832, 5855), True, 'import matplotlib.pyplot as plt\n'), ((6465, 6500), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 5)'}), '(1, 2, figsize=(10, 5))\n', (6477, 6500), True, 'import matplotlib.pyplot as plt\n'), ((6512, 6539), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.99)', '(99)'], {}), '(0.01, 0.99, 99)\n', (6523, 6539), True, 'import numpy as np\n'), ((7080, 7115), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 5)'}), '(1, 2, figsize=(10, 5))\n', (7092, 7115), True, 'import matplotlib.pyplot as plt\n'), ((7127, 7154), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.99)', '(99)'], {}), '(0.01, 0.99, 99)\n', (7138, 7154), True, 'import numpy as np\n'), ((7754, 7788), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (7766, 7788), True, 'import matplotlib.pyplot as plt\n'), ((7800, 7827), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.99)', '(99)'], {}), '(0.01, 0.99, 99)\n', (7811, 7827), True, 'import numpy as np\n'), ((8314, 8343), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 4.5)'}), '(figsize=(15, 4.5))\n', (8324, 8343), True, 'import matplotlib.pyplot as plt\n'), ((8906, 8926), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (8917, 8926), True, 'import matplotlib.pyplot as plt\n'), ((9408, 9429), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['contour'], {}), '(contour)\n', (9420, 9429), True, 'import matplotlib.pyplot as plt\n'), ((10057, 10075), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['surf'], {}), '(surf)\n', (10069, 10075), True, 'import matplotlib.pyplot as plt\n'), ((10168, 10188), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (10179, 10188), True, 'import matplotlib.pyplot as plt\n'), ((10752, 10773), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['contour'], {}), '(contour)\n', (10764, 10773), True, 'import matplotlib.pyplot as plt\n'), ((10820, 10838), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10836, 10838), True, 'import matplotlib.pyplot as plt\n'), ((10963, 10998), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 2)'}), '(1, 1, figsize=(10, 2))\n', (10975, 10998), True, 'import matplotlib.pyplot as plt\n'), ((11675, 11704), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4.5)'}), '(figsize=(10, 4.5))\n', (11685, 11704), True, 'import matplotlib.pyplot as plt\n'), ((12695, 12722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (12705, 12722), True, 'import matplotlib.pyplot as plt\n'), ((13206, 13241), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 2)'}), '(1, 1, figsize=(10, 2))\n', (13218, 13241), True, 'import matplotlib.pyplot as plt\n'), ((13602, 13633), 'numpy.nan_to_num', 'np.nan_to_num', (['precrec'], {'nan': '(1.0)'}), '(precrec, nan=1.0)\n', (13615, 13633), True, 'import numpy as np\n'), ((13843, 13878), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(10, 6)'}), '(3, 1, figsize=(10, 6))\n', (13855, 13878), True, 'import matplotlib.pyplot as plt\n'), ((14237, 14252), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'y'], {}), '(y, y)\n', (14246, 14252), False, 'from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve\n'), ((14306, 14334), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y', 'y'], {}), '(y, y)\n', (14328, 14334), False, 'from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve\n'), ((14560, 14587), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'probabilities'], {}), '(y, probabilities)\n', (14569, 14587), False, 'from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve\n'), ((14638, 14678), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y', 'probabilities'], {}), '(y, probabilities)\n', (14660, 14678), False, 'from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve\n'), ((3008, 3046), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (3022, 3046), False, 'from matplotlib.colors import ListedColormap\n'), ((3174, 3200), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (3183, 3200), True, 'import numpy as np\n'), ((3227, 3253), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (3236, 3253), True, 'import numpy as np\n'), ((4186, 4233), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', '(probabilities >= threshold)'], {}), '(y, probabilities >= threshold)\n', (4202, 4233), False, 'from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve\n'), ((5766, 5804), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (5780, 5804), False, 'from matplotlib.colors import ListedColormap\n'), ((8265, 8303), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (8279, 8303), False, 'from matplotlib.colors import ListedColormap\n'), ((8624, 8650), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (8633, 8650), True, 'import numpy as np\n'), ((8677, 8703), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (8686, 8703), True, 'import numpy as np\n'), ((11223, 11247), 'numpy.zeros_like', 'np.zeros_like', (['x[y == 1]'], {}), '(x[y == 1])\n', (11236, 11247), True, 'import numpy as np\n'), ((11314, 11338), 'numpy.zeros_like', 'np.zeros_like', (['x[y == 0]'], {}), '(x[y == 0])\n', (11327, 11338), True, 'import numpy as np\n'), ((11997, 12021), 'numpy.zeros_like', 'np.zeros_like', (['x[y == 1]'], {}), '(x[y == 1])\n', (12010, 12021), True, 'import numpy as np\n'), ((12088, 12112), 'numpy.zeros_like', 'np.zeros_like', (['x[y == 0]'], {}), '(x[y == 0])\n', (12101, 12112), True, 'import numpy as np\n'), ((13409, 13456), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', '(probabilities >= threshold)'], {}), '(y, probabilities >= threshold)\n', (13425, 13456), False, 'from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve\n'), ((428, 438), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (434, 438), True, 'import numpy as np\n')] |
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:4/30/2021 2:04 PM
# @File:PIL_utils
import cv2
import numpy as np
import numpy
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
from PIL import Image, ImageOps
def PIL2cv2(image):
"""PIL转cv"""
return cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
def ImgText_CN(img, text, left, top, textColor=(0, 255, 0), textSize=20):
# 用于给图片添加中文字符
if (isinstance(img, numpy.ndarray)): # 判断是否为OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype("font/simhei.ttf", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)
| [
"PIL.ImageDraw.Draw",
"numpy.asarray",
"PIL.ImageFont.truetype",
"cv2.cvtColor"
] | [((597, 616), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (611, 616), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((632, 697), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""font/simhei.ttf"""', 'textSize'], {'encoding': '"""utf-8"""'}), "('font/simhei.ttf', textSize, encoding='utf-8')\n", (650, 697), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((327, 344), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (337, 344), True, 'import numpy as np\n'), ((781, 799), 'numpy.asarray', 'numpy.asarray', (['img'], {}), '(img)\n', (794, 799), False, 'import numpy\n'), ((548, 584), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (560, 584), False, 'import cv2\n')] |
from scipy.optimize import least_squares
import numpy as np
def reprojection_loss_function(opt_variables, points_2d, num_pts):
P = opt_variables[0:12].reshape(3,4)
point_3d = opt_variables[12:].reshape((num_pts, 4))
rep_error = []
for idx, pt_3d in enumerate(point_3d):
pt_2d = np.array([points_2d[0][idx], points_2d[1][idx]])
reprojected_pt = np.matmul(P, pt_3d)
reprojected_pt /= reprojected_pt[2]
rep_error.append(pt_2d - reprojected_pt[0:2])
return np.array(rep_error).ravel()
def bundle_adjustment(points_3d, points_2d, img, projection_matrix):
return None
opt_variables = np.hstack((projection_matrix.ravel(), points_3d.ravel(order="F")))
num_points = len(points_2d[0])
corrected_values = least_squares(reprojection_loss_function, opt_variables, args=(points_2d,num_points))
print("The optimized values \n" + str(corrected_values))
P = corrected_values.x[0:12].reshape(3,4)
points_3d = corrected_values.x[12:].reshape((num_points, 4))
return P, points_3d | [
"numpy.array",
"scipy.optimize.least_squares",
"numpy.matmul"
] | [((770, 860), 'scipy.optimize.least_squares', 'least_squares', (['reprojection_loss_function', 'opt_variables'], {'args': '(points_2d, num_points)'}), '(reprojection_loss_function, opt_variables, args=(points_2d,\n num_points))\n', (783, 860), False, 'from scipy.optimize import least_squares\n'), ((305, 353), 'numpy.array', 'np.array', (['[points_2d[0][idx], points_2d[1][idx]]'], {}), '([points_2d[0][idx], points_2d[1][idx]])\n', (313, 353), True, 'import numpy as np\n'), ((380, 399), 'numpy.matmul', 'np.matmul', (['P', 'pt_3d'], {}), '(P, pt_3d)\n', (389, 399), True, 'import numpy as np\n'), ((510, 529), 'numpy.array', 'np.array', (['rep_error'], {}), '(rep_error)\n', (518, 529), True, 'import numpy as np\n')] |
import battleship as ship
import pandas as pd
import numpy as np
def random_generation(generation_size, genes):
# create dataframe for gene pool
generation = pd.DataFrame(columns=['Sequence','Chromosome','Generation','Birth','Fitness','Parents'])
# for each chromosome
i = 0
while i < generation_size:
# create random chromosome
chromosome = {}
chromosome['Sequence'] = i+1
chromosome['Chromosome'] = ''.join(str(x) for x in list(np.random.randint(2, size=genes)))
chromosome['Generation'] = 1
chromosome['Birth'] = 'Random'
chromosome['Parents'] = 0
# check for uniqueness and add to gene pool
if chromosome['Chromosome'] not in generation['Chromosome']:
generation = generation.append(chromosome, ignore_index=True)
i += 1
# return the generation
return generation
def assign_elites(generation, elite_rate):
# determine number of elites
generation_size = generation.shape[0]
elites = elite_rate * generation_size
# assign elite status to most fit chromosomes
generation['Elite'] = False
generation = generation.sort_values(by='Fitness', ascending=False)
generation.iloc[0:int(elites),6:7] = True
# return the generation
return generation
def select_elites(generation):
# copy elites from old generation
elites = generation.loc[generation['Elite'] == True].copy()
# update attributes of new generation
pool_size = generation['Sequence'].max()
elites['Parents'] = elites['Sequence']
elites['Sequence'] = range(pool_size + 1, pool_size + elites.shape[0] + 1)
elites.loc[:,'Birth'] = 'Elitism'
elites['Elite'] = False
elites['Generation'] = generation['Generation'].max() + 1
return elites
def create_mutants(generation, mutants, bit_flip_rate):
# get generation attributes
last_generation = generation['Generation'].max()
last_sequence = generation['Sequence'].max()
n_elites = generation['Birth'].value_counts()['Elitism']
# for each mutant
i = 0
while i < mutants:
# create mutant chromosome
chromosome = {}
chromosome['Sequence'] = last_sequence + i + 1
chromosome['Generation'] = last_generation
chromosome['Birth'] = 'Mutation'
chromosome['Elite'] = False
# select random elite as new parent
parent_index = np.random.choice(n_elites)
chromosome['Parents'] = list(generation['Sequence'].values)[parent_index]
parent = list(generation['Chromosome'].values)[parent_index]
# create array of random bit flips
bit_flip_array = np.random.choice(2, len(parent), p=[1 - bit_flip_rate, bit_flip_rate])
bits_to_flip = ''.join(str(x) for x in list(bit_flip_array.flatten()))
# create mutant child from parent and flip bits from array
mutant = ''
for j in range(len(bits_to_flip)):
if not int(bits_to_flip[j]):
mutant += parent[j]
else:
mutant += str(abs(int(parent[j]) - 1))
# check for uniqueness and add to gene pool
chromosome['Chromosome'] = mutant
if chromosome['Chromosome'] not in generation['Chromosome']:
generation = generation.append(chromosome, ignore_index=True)
i += 1
# return the generation
return generation
def create_splices(generation, n_splice_pairs):
# get generation attributes
last_generation = generation['Generation'].max()
last_sequence = generation['Sequence'].max()
n_elites = generation['Birth'].value_counts()['Elitism']
# for each splice pair
i = 0
while i < n_splice_pairs:
# create splice pair chromosome
chromosome = {}
chromosome['Generation'] = last_generation
chromosome['Birth'] = 'Splice Pair'
chromosome['Elite'] = False
# select random elite pair as new parents
parent_indices = np.random.choice(n_elites, 2, replace=False)
chromosome['Parents'] = np.array(generation['Sequence'].values)[parent_indices]
parents = np.array(generation['Chromosome'].values)[parent_indices]
# create random splice bit
splice_bit = np.random.randint(len(parents[0]))
# create splice pair children from parent and cross over bits
splices = []
splices.append(parents[0][0:splice_bit] + parents[1][splice_bit:len(parents[1])])
splices.append(parents[1][0:splice_bit] + parents[0][splice_bit:len(parents[0])])
# add splices to gene pool
chromosome['Chromosome'] = splices[0]
chromosome['Sequence'] = last_sequence + i + 1
generation = generation.append(chromosome, ignore_index=True)
chromosome['Chromosome'] = splices[1]
chromosome['Sequence'] = last_sequence + i + 2
generation = generation.append(chromosome, ignore_index=True)
i += 1
# return the generation
return generation
def fill_random(generation, generation_size, genes):
# get generation attributes
last_generation = generation['Generation'].max()
last_sequence = generation['Sequence'].max()
# for each random chromosome
i = generation.shape[0]
while i < generation_size:
# create random chromosome
chromosome = {}
chromosome['Sequence'] = last_sequence + i + 1
chromosome['Chromosome'] = ''.join(str(x) for x in list(np.random.randint(2, size=genes)))
chromosome['Generation'] = last_generation
chromosome['Birth'] = 'Random'
chromosome['Parents'] = 0
chromosome['Elite'] = False
# check for uniqueness and add to gene pool
if chromosome['Chromosome'] not in generation['Chromosome']:
generation = generation.append(chromosome, ignore_index=True)
i += 1
# return the generation
return generation
def create_descendents(gene_pool, elite_rate, solution, stop_limit):
# copy initial generation
next_generation = gene_pool.copy()
generation_size = next_generation.shape[0]
# create generations until fitness criteria is achieved
while gene_pool['Fitness'].max() < stop_limit:
# print current generation
# print(str(gene_pool['Generation'].max()) + ': ' + str(gene_pool['Fitness'].max()))
# select elites with elite rate
next_generation = select_elites(next_generation)
# add splice pairs to generation
splice_pair_rate = elite_rate / 2
n_splice_pairs = int(splice_pair_rate * generation_size)
next_generation = create_splices(next_generation, n_splice_pairs)
# add mutants to generation
mutant_rate = 0.60
bit_flip_rate = 0.01
n_mutants = int(mutant_rate * generation_size)
next_generation = create_mutants(next_generation, n_mutants, bit_flip_rate)
# fill the rest of the generation with random chromosomes for diversity
next_generation = fill_random(next_generation, generation_size, 100)
# compare fitness
next_generation['Fitness'] = next_generation.apply(lambda row: ship.accuracy(row.Chromosome, solution), axis=1)
# assign elites with elite rate
elite_rate = 0.20
next_generation = assign_elites(next_generation, elite_rate)
next_generation
# add generation to gene pool
gene_pool = gene_pool.append(next_generation)
return gene_pool
def solve(solution, generation_size):
# initialize the first random generation
gene_pool = random_generation(generation_size, 100)
# compare fitness
gene_pool['Fitness'] = gene_pool.apply(lambda row: ship.accuracy(row.Chromosome, solution), axis=1)
# assign elites with elite rate
elite_rate = 0.20
gene_pool = assign_elites(gene_pool, elite_rate)
# create successive generations until termination criteria is met
gene_pool = create_descendents(gene_pool, elite_rate, solution, 1.0)
gene_pool = gene_pool.set_index('Sequence')
return gene_pool | [
"numpy.random.choice",
"numpy.array",
"numpy.random.randint",
"pandas.DataFrame",
"battleship.accuracy"
] | [((168, 265), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Sequence', 'Chromosome', 'Generation', 'Birth', 'Fitness', 'Parents']"}), "(columns=['Sequence', 'Chromosome', 'Generation', 'Birth',\n 'Fitness', 'Parents'])\n", (180, 265), True, 'import pandas as pd\n'), ((2470, 2496), 'numpy.random.choice', 'np.random.choice', (['n_elites'], {}), '(n_elites)\n', (2486, 2496), True, 'import numpy as np\n'), ((4085, 4129), 'numpy.random.choice', 'np.random.choice', (['n_elites', '(2)'], {'replace': '(False)'}), '(n_elites, 2, replace=False)\n', (4101, 4129), True, 'import numpy as np\n'), ((4162, 4201), 'numpy.array', 'np.array', (["generation['Sequence'].values"], {}), "(generation['Sequence'].values)\n", (4170, 4201), True, 'import numpy as np\n'), ((4236, 4277), 'numpy.array', 'np.array', (["generation['Chromosome'].values"], {}), "(generation['Chromosome'].values)\n", (4244, 4277), True, 'import numpy as np\n'), ((7897, 7936), 'battleship.accuracy', 'ship.accuracy', (['row.Chromosome', 'solution'], {}), '(row.Chromosome, solution)\n', (7910, 7936), True, 'import battleship as ship\n'), ((7355, 7394), 'battleship.accuracy', 'ship.accuracy', (['row.Chromosome', 'solution'], {}), '(row.Chromosome, solution)\n', (7368, 7394), True, 'import battleship as ship\n'), ((486, 518), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'genes'}), '(2, size=genes)\n', (503, 518), True, 'import numpy as np\n'), ((5621, 5653), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'genes'}), '(2, size=genes)\n', (5638, 5653), True, 'import numpy as np\n')] |
"""
Agent module
"""
from random import randint, random
import numpy as np
def q_learning(environment, learning_rate, gamma, total_iteration, show=False):
"""
Q-learning: An off-policy TD control algorithm
as described in Reinforcement Learning: An Introduction" 1998 p158 by <NAME>
https://web.stanford.edu/class/psych209/Readings/SuttonBartoIPRLBook2ndEd.pdf
Some comments include ##'comment', they quote from <NAME> pseudo-code's
"""
#ADDED features compared to <NAME>. SUTTON algoritm's
epoch_show = 200
exploration_function = lambda i: -1 / (total_iteration * 0.8) * i + 1
##'Initialize Q(s,a)...arbitrarly execpt that Q(terminal,.)=0'
number_states = environment.nb_discretisation_x
number_action = 2
Q = np.random.rand(number_states, number_states, number_states, number_states, number_action)
##'Repeat (for each episode):'
for iteration in range(total_iteration):
end = False
##'Initialize S'
statut = environment.reset()
##'Repeat (for each step of episode):'
while not end:
##'Choose A from S using ...'
if random() < exploration_function(iteration):
#ADDED: features to encourage erratic behavior
action = randint(0, number_action-1)
else:
##'policy derived from Q'
action = np.argmax(Q[statut[0], statut[1], statut[2], statut[3]])
##'Take action A ...'
observation = environment.step(action)
##', observe R,S''
futur_statut, reward, end = observation
##'Q(S,A)=Q(S,A)+learning_rate*...'
Q[statut[0], statut[1], statut[2], statut[3], action] = Q[statut[0], statut[1], statut[2], statut[3], action] + learning_rate * (reward + gamma * np.max(Q[futur_statut[0], futur_statut[1], futur_statut[2], futur_statut[3], :]) - Q[statut[0], statut[1], statut[2], statut[3], action])
##'S becomes S''
statut = futur_statut.copy()
#ADDED: Show behavior in a window
if (iteration%epoch_show == 0 or iteration == total_iteration-1) and show:
environment.render()
return Q
| [
"numpy.random.rand",
"numpy.argmax",
"numpy.max",
"random.random",
"random.randint"
] | [((768, 861), 'numpy.random.rand', 'np.random.rand', (['number_states', 'number_states', 'number_states', 'number_states', 'number_action'], {}), '(number_states, number_states, number_states, number_states,\n number_action)\n', (782, 861), True, 'import numpy as np\n'), ((1151, 1159), 'random.random', 'random', ([], {}), '()\n', (1157, 1159), False, 'from random import randint, random\n'), ((1284, 1313), 'random.randint', 'randint', (['(0)', '(number_action - 1)'], {}), '(0, number_action - 1)\n', (1291, 1313), False, 'from random import randint, random\n'), ((1398, 1454), 'numpy.argmax', 'np.argmax', (['Q[statut[0], statut[1], statut[2], statut[3]]'], {}), '(Q[statut[0], statut[1], statut[2], statut[3]])\n', (1407, 1454), True, 'import numpy as np\n'), ((1833, 1918), 'numpy.max', 'np.max', (['Q[futur_statut[0], futur_statut[1], futur_statut[2], futur_statut[3], :]'], {}), '(Q[futur_statut[0], futur_statut[1], futur_statut[2], futur_statut[3], :]\n )\n', (1839, 1918), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""Run a YOLO_v2 style detection model on test images."""
import argparse
import colorsys
import imghdr
import os
import random
import time,cv2
import numpy as np
from keras import backend as K
from keras.models import load_model
from PIL import Image, ImageDraw, ImageFont
from yad2k.models.keras_yolo import yolo_eval, yolo_head
from settings import *
from yolo_utils import *
import tensorflow as tf
import pickle
parser = argparse.ArgumentParser(
description='Run a YOLO_v2 style detection model on test images..')
parser.add_argument(
'model_path',
help='path to h5 model file containing body'
'of a YOLO_v2 model')
parser.add_argument(
'-a',
'--anchors_path',
help='path to anchors file, defaults to yolo_anchors.txt',
default='model_data/yolo_anchors.txt')
parser.add_argument(
'-c',
'--classes_path',
help='path to classes file, defaults to coco_classes.txt',
default='model_data/coco_classes.txt')
parser.add_argument(
'-t',
'--test_path',
help='path to directory of test images, defaults to images/',
default='images')
parser.add_argument(
'-o',
'--output_path',
help='path to output test images, defaults to images/out',
default='images/out')
parser.add_argument(
'-s',
'--score_threshold',
type=float,
help='threshold for bounding box scores, default .3',
default=.3)
parser.add_argument(
'-iou',
'--iou_threshold',
type=float,
help='threshold for non max suppression IOU, default .5',
default=.5)
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):
"""Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
box_scores = box_confidence * box_class_probs # [19, 19, 5, 1] * [19, 19, 5, 80] = [19, 19, 5, 80]
# Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores, axis=-1, keepdims=False)
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
filtering_mask = box_class_scores >= threshold
# Step 4: Apply the mask to scores, boxes and classes
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
return scores, boxes, classes
def preprocess_image(img_path, model_image_size):
image_type = imghdr.what(img_path)
image = Image.open(img_path)
resized_image = image.resize(tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image, image_data
def preprocess_frame(frame, model_image_size):
frame = frame[:,:,::-1]
image = frame
frame = cv2.resize(frame, (model_image_size[0], model_image_size[1]))
image_data = np.array(frame, dtype='float32')
# image = Image.fromarray(np.uint8(cm.gist_earth(frame) * 255))
#image = Image.fromarray(cm.gist_earth(frame, bytes=True))
image = Image.fromarray(image.astype('uint8'), 'RGB')
#image = Image.fromarray(frame)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image, image_data
def calculate_position(bbox, transform_matrix, warped_size, pix_per_meter):
if len(bbox) == 0:
print('Nothing')
else:
pos = np.array((bbox[1]/2+bbox[3]/2, bbox[2])).reshape(1, 1, -1)
dst = cv2.perspectiveTransform(pos, transform_matrix).reshape(-1, 1)
return np.array((warped_size[1]-dst[1])/pix_per_meter[1])
def _main(args):
with open(CALIB_FILE_NAME, 'rb') as f:
calib_data = pickle.load(f)
cam_matrix = calib_data["cam_matrix"]
dist_coeffs = calib_data["dist_coeffs"]
img_size = calib_data["img_size"]
with open(PERSPECTIVE_FILE_NAME, 'rb') as f:
perspective_data = pickle.load(f)
perspective_transform = perspective_data["perspective_transform"]
pixels_per_meter = perspective_data['pixels_per_meter']
orig_points = perspective_data["orig_points"]
### Video
video_path = '/home/crke/Work/YAD2K/input_videos/challenge_video.mp4' #'/home/crke/Work/YAD2K/input_videos/harder_challenge_video.mp4' #'/home/crke/Work/YAD2K/input_videos/project_video.mp4'
output_path = '/home/crke/Work/YAD2K/output_videos/'
output_Video = os.path.basename(video_path)
output_Video = os.path.join(output_path, output_Video)
cap = cv2.VideoCapture(video_path)
FrameCnt = 0
fps = cap.get(cv2.CAP_PROP_FPS)
FrameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
Width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
Height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
isModelSize = False
if Width == 608 and Height == 608:
isModelSize = True
print("Video Info:")
print("Input: ", video_path)
print("FPS: ", fps)
print("FrameNum: ", FrameNum)
print("Width: ", Width)
print("Height: ", Height)
print("Output: ", output_Video)
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # DIVX, XVID, MJPG, X264, WMV1, WMV2
outVideo = cv2.VideoWriter(output_Video, fourcc, fps, (VIDEO_SIZE[0], VIDEO_SIZE[1]))
###
model_path = os.path.expanduser(args.model_path)
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
anchors_path = os.path.expanduser(args.anchors_path)
classes_path = os.path.expanduser(args.classes_path)
test_path = os.path.expanduser(args.test_path)
output_path = os.path.expanduser(args.output_path)
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=args.score_threshold,
iou_threshold=args.iou_threshold)
# Image for debug
# image_file = 'test4.jpg'
# image_shape = (720., 1280.)
#
# frame, image_data = preprocess_image("images/" + image_file, (int(MODEL_SIZE[0]), int(MODEL_SIZE[1])))
# out_boxes, out_scores, out_classes = sess.run(
# [boxes, scores, classes],
# feed_dict={
# yolo_model.input: image_data,
# input_image_shape: [(image_shape[0]), (image_shape[1])],
# K.learning_phase(): 0
# })
#
#
# distance = np.zeros(shape=(3,1))
# if not len(out_boxes) == 0:
# l = len(out_boxes)
# for i in range(l):
# distance[i] = calculate_position(bbox=out_boxes[i],
# transform_matrix=perspective_transform,
# warped_size=UNWARPED_SIZE,
# pix_per_meter=pixels_per_meter)
#
# print('RPOS', distance)
# draw_boxes(frame, out_scores, out_boxes, out_classes, class_names, colors, distance)
#
# else:
# distance = []
# print('No Car')
#
# frame.save(os.path.join('out', image_file), quality=90)
### END
image_shape = (720., 1280.)
# Read until video is completed
while (cap.isOpened()):
ret, frame = cap.read()
batch = 1
if ret == True:
index = (FrameCnt + 1) % batch
frame, image_data = preprocess_frame(frame, (int(MODEL_SIZE[0]), int(MODEL_SIZE[1])))
t0 = time.time()
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [(image_shape[0]), (image_shape[1])],
K.learning_phase(): 0
})
duration = time.time() - t0
print('duration', duration)
print('fps', 1 / duration)
print('out_boxes', out_boxes)
###
l = len(out_boxes)
distance = np.zeros(shape=(l, 1))
if not len(out_boxes) == 0:
for i in range(l):
distance[i] = calculate_position(bbox=out_boxes[i],
transform_matrix=perspective_transform,
warped_size=UNWARPED_SIZE,
pix_per_meter=pixels_per_meter)
print('RPOS', distance)
draw_boxes(frame, out_scores, out_boxes, out_classes, class_names, colors, distance)
else:
distance = []
print('No Car')
draw_boxes(frame, out_scores, out_boxes, out_classes, class_names, colors, distance)
#resized_image = frame.resize(tuple(reversed((VIDEO_SIZE[1], VIDEO_SIZE[0]))), Image.BICUBIC)
#resized_image.save('/home/crke/Work/YAD2K/1.jpg' , quality=90)
pix = np.array(frame)
pix = pix[:,:,::-1]
#pix.resize((Width, Height))
outVideo.write(pix)
# Break the loop
else:
break
cap.release()
outVideo.release()
sess.close()
print("Finish video convert !!!")
if __name__ == '__main__':
_main(parser.parse_args())
| [
"tensorflow.boolean_mask",
"keras.backend.learning_phase",
"colorsys.hsv_to_rgb",
"numpy.array",
"os.path.exists",
"argparse.ArgumentParser",
"keras.backend.placeholder",
"keras.backend.max",
"cv2.VideoWriter",
"os.mkdir",
"cv2.VideoWriter_fourcc",
"keras.backend.argmax",
"cv2.perspectiveTra... | [((452, 548), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run a YOLO_v2 style detection model on test images.."""'}), "(description=\n 'Run a YOLO_v2 style detection model on test images..')\n", (475, 548), False, 'import argparse\n'), ((2644, 2673), 'keras.backend.argmax', 'K.argmax', (['box_scores'], {'axis': '(-1)'}), '(box_scores, axis=-1)\n', (2652, 2673), True, 'from keras import backend as K\n'), ((2694, 2736), 'keras.backend.max', 'K.max', (['box_scores'], {'axis': '(-1)', 'keepdims': '(False)'}), '(box_scores, axis=-1, keepdims=False)\n', (2699, 2736), True, 'from keras import backend as K\n'), ((3076, 3125), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['box_class_scores', 'filtering_mask'], {}), '(box_class_scores, filtering_mask)\n', (3091, 3125), True, 'import tensorflow as tf\n'), ((3135, 3173), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['boxes', 'filtering_mask'], {}), '(boxes, filtering_mask)\n', (3150, 3173), True, 'import tensorflow as tf\n'), ((3185, 3229), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['box_classes', 'filtering_mask'], {}), '(box_classes, filtering_mask)\n', (3200, 3229), True, 'import tensorflow as tf\n'), ((3327, 3348), 'imghdr.what', 'imghdr.what', (['img_path'], {}), '(img_path)\n', (3338, 3348), False, 'import imghdr\n'), ((3358, 3378), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (3368, 3378), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3473, 3513), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (3481, 3513), True, 'import numpy as np\n'), ((3548, 3577), 'numpy.expand_dims', 'np.expand_dims', (['image_data', '(0)'], {}), '(image_data, 0)\n', (3562, 3577), True, 'import numpy as np\n'), ((3726, 3787), 'cv2.resize', 'cv2.resize', (['frame', '(model_image_size[0], model_image_size[1])'], {}), '(frame, (model_image_size[0], model_image_size[1]))\n', (3736, 3787), False, 'import time, cv2\n'), ((3802, 3834), 'numpy.array', 'np.array', (['frame'], {'dtype': '"""float32"""'}), "(frame, dtype='float32')\n", (3810, 3834), True, 'import numpy as np\n'), ((4082, 4111), 'numpy.expand_dims', 'np.expand_dims', (['image_data', '(0)'], {}), '(image_data, 0)\n', (4096, 4111), True, 'import numpy as np\n'), ((5225, 5253), 'os.path.basename', 'os.path.basename', (['video_path'], {}), '(video_path)\n', (5241, 5253), False, 'import os\n'), ((5270, 5309), 'os.path.join', 'os.path.join', (['output_path', 'output_Video'], {}), '(output_path, output_Video)\n', (5282, 5309), False, 'import os\n'), ((5318, 5346), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (5334, 5346), False, 'import time, cv2\n'), ((5823, 5854), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (5845, 5854), False, 'import time, cv2\n'), ((5905, 5979), 'cv2.VideoWriter', 'cv2.VideoWriter', (['output_Video', 'fourcc', 'fps', '(VIDEO_SIZE[0], VIDEO_SIZE[1])'], {}), '(output_Video, fourcc, fps, (VIDEO_SIZE[0], VIDEO_SIZE[1]))\n', (5920, 5979), False, 'import time, cv2\n'), ((6001, 6036), 'os.path.expanduser', 'os.path.expanduser', (['args.model_path'], {}), '(args.model_path)\n', (6019, 6036), False, 'import os\n'), ((6123, 6160), 'os.path.expanduser', 'os.path.expanduser', (['args.anchors_path'], {}), '(args.anchors_path)\n', (6141, 6160), False, 'import os\n'), ((6177, 6214), 'os.path.expanduser', 'os.path.expanduser', (['args.classes_path'], {}), '(args.classes_path)\n', (6195, 6214), False, 'import os\n'), ((6228, 6262), 'os.path.expanduser', 'os.path.expanduser', (['args.test_path'], {}), '(args.test_path)\n', (6246, 6262), False, 'import os\n'), ((6278, 6314), 'os.path.expanduser', 'os.path.expanduser', (['args.output_path'], {}), '(args.output_path)\n', (6296, 6314), False, 'import os\n'), ((6441, 6456), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (6454, 6456), True, 'from keras import backend as K\n'), ((6785, 6807), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (6795, 6807), False, 'from keras.models import load_model\n'), ((7805, 7823), 'random.seed', 'random.seed', (['(10101)'], {}), '(10101)\n', (7816, 7823), False, 'import random\n'), ((7874, 7896), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (7888, 7896), False, 'import random\n'), ((7949, 7966), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (7960, 7966), False, 'import random\n'), ((8208, 8233), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (8221, 8233), True, 'from keras import backend as K\n'), ((8261, 8380), 'yad2k.models.keras_yolo.yolo_eval', 'yolo_eval', (['yolo_outputs', 'input_image_shape'], {'score_threshold': 'args.score_threshold', 'iou_threshold': 'args.iou_threshold'}), '(yolo_outputs, input_image_shape, score_threshold=args.\n score_threshold, iou_threshold=args.iou_threshold)\n', (8270, 8380), False, 'from yad2k.models.keras_yolo import yolo_eval, yolo_head\n'), ((4433, 4487), 'numpy.array', 'np.array', (['((warped_size[1] - dst[1]) / pix_per_meter[1])'], {}), '((warped_size[1] - dst[1]) / pix_per_meter[1])\n', (4441, 4487), True, 'import numpy as np\n'), ((4561, 4575), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4572, 4575), False, 'import pickle\n'), ((4763, 4777), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4774, 4777), False, 'import pickle\n'), ((6324, 6351), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (6338, 6351), False, 'import os\n'), ((6410, 6431), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (6418, 6431), False, 'import os\n'), ((9638, 9649), 'time.time', 'time.time', ([], {}), '()\n', (9647, 9649), False, 'import time, cv2\n'), ((10048, 10070), 'numpy.zeros', 'np.zeros', ([], {'shape': '(l, 1)'}), '(shape=(l, 1))\n', (10056, 10070), True, 'import numpy as np\n'), ((10825, 10840), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (10833, 10840), True, 'import numpy as np\n'), ((4294, 4340), 'numpy.array', 'np.array', (['(bbox[1] / 2 + bbox[3] / 2, bbox[2])'], {}), '((bbox[1] / 2 + bbox[3] / 2, bbox[2]))\n', (4302, 4340), True, 'import numpy as np\n'), ((4361, 4408), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['pos', 'transform_matrix'], {}), '(pos, transform_matrix)\n', (4385, 4408), False, 'import time, cv2\n'), ((6737, 6754), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (6745, 6754), True, 'import numpy as np\n'), ((7669, 7692), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (7688, 7692), False, 'import colorsys\n'), ((9892, 9903), 'time.time', 'time.time', ([], {}), '()\n', (9901, 9903), False, 'import time, cv2\n'), ((9848, 9866), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (9864, 9866), True, 'from keras import backend as K\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resize bins for the 10X formatted dataset."""
import collections
import math
import os
from typing import Any
from absl import logging
import anndata
import numpy as np
import scipy.io
import scipy.sparse
import tensorflow as tf
def grange(chrom, start, end):
return f'{chrom}:{start}-{end}'
def bins_from_annotation(adata,
annotation):
"""Generates a count matrix for the given annotation from a binned matrix.
The binning size is infered from the first row of the matrix.
Args:
adata: Original binned matrix, all bins are assumed to be of the same size.
annotation: Path to the annotation file.
Returns:
New count matrix
"""
valid_bins = set(adata.var_names)
start, end = adata.var_names[0].split(':')[1].split('-')
binsize = int(end) - int(start)
annot_index = []
annot_rows = []
with tf.io.gfile.GFile(annotation, 'r') as f:
for line in f:
splits = line.split(',')
chrom, gene_start, gene_end = splits[0], int(splits[1]), int(splits[2])
start = math.floor(gene_start / binsize) * binsize
end = math.ceil(gene_end / binsize) * binsize
acc = []
for position in range(start, end, binsize):
region = grange(chrom, position, position + binsize)
if region in valid_bins:
acc.append(region)
if acc:
annot_rows.append(adata[:, acc].X.sum(axis=1))
annot_index.append(grange(chrom, gene_start, gene_end))
new_adata = anndata.AnnData(scipy.sparse.csr_matrix(np.hstack(annot_rows)))
new_adata.var_names = annot_index
new_adata.obs = adata.obs
return new_adata
def merge_bins(adata, bin_size):
"""Merge bins."""
orig_bins = collections.defaultdict(list)
for coor in adata.var_names:
chrom, start, end = coor.split(':')[0], int(
coor.split(':')[1].split('-')[0]), int(
coor.split(':')[1].split('-')[1])
orig_bins[chrom].append((start, end))
logging.info('Done with counting the bins')
resized_bins_index = []
resized_chrs = []
resized_bins_counts = []
for chrom, ranges in orig_bins.items():
curr_bin = 0
curr_acc = []
for (start, end) in sorted(ranges):
if start // bin_size == curr_bin:
curr_acc.append(f'{chrom}:{start}-{end}')
else:
if curr_acc:
# For the empty initialisation at the beginning of the chr.
resized_bins_counts.append(adata[:, curr_acc].X.sum(axis=1))
resized_bins_index.append(
f'{chrom}:{curr_bin*bin_size}-{(curr_bin+1)*bin_size}')
curr_acc = [f'{chrom}:{start}-{end}']
curr_bin = start // bin_size
resized_bins_counts.append(adata[:, curr_acc].X.sum(axis=1))
resized_bins_index.append(
f'{chrom}:{curr_bin*bin_size}-{(curr_bin+1)*bin_size}')
resized_chrs.append(scipy.sparse.csr_matrix(np.hstack(resized_bins_counts)))
resized_bins_counts = []
logging.info('Done with %s', chrom)
new_adata = anndata.AnnData(
scipy.sparse.csr_matrix(
np.hstack([chrom.toarray() for chrom in resized_chrs])))
new_adata.var_names = resized_bins_index
new_adata.obs = adata.obs
return new_adata
| [
"tensorflow.io.gfile.GFile",
"math.ceil",
"math.floor",
"numpy.hstack",
"absl.logging.info",
"collections.defaultdict"
] | [((2301, 2330), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2324, 2330), False, 'import collections\n'), ((2549, 2592), 'absl.logging.info', 'logging.info', (['"""Done with counting the bins"""'], {}), "('Done with counting the bins')\n", (2561, 2592), False, 'from absl import logging\n'), ((1471, 1505), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['annotation', '"""r"""'], {}), "(annotation, 'r')\n", (1488, 1505), True, 'import tensorflow as tf\n'), ((3512, 3547), 'absl.logging.info', 'logging.info', (['"""Done with %s"""', 'chrom'], {}), "('Done with %s', chrom)\n", (3524, 3547), False, 'from absl import logging\n'), ((2125, 2146), 'numpy.hstack', 'np.hstack', (['annot_rows'], {}), '(annot_rows)\n', (2134, 2146), True, 'import numpy as np\n'), ((1654, 1686), 'math.floor', 'math.floor', (['(gene_start / binsize)'], {}), '(gene_start / binsize)\n', (1664, 1686), False, 'import math\n'), ((1709, 1738), 'math.ceil', 'math.ceil', (['(gene_end / binsize)'], {}), '(gene_end / binsize)\n', (1718, 1738), False, 'import math\n'), ((3446, 3476), 'numpy.hstack', 'np.hstack', (['resized_bins_counts'], {}), '(resized_bins_counts)\n', (3455, 3476), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""Channel coding in L2.
This module contains functions used for conversion between type-1 and type-5
bits in lower MAC. These process individual bursts or blocks, and don't
store state in between calls."""
import ctypes
import numpy as np
import numba # to make things run faster
libcorrect = ctypes.CDLL("../../libcorrect/build/lib/libcorrect.so")
libcorrect.correct_convolutional_create.restype = ctypes.c_void_p
libcorrect.correct_convolutional_create.argtypes = (ctypes.c_size_t, ctypes.c_size_t, ctypes.POINTER(ctypes.c_uint16))
libcorrect.correct_convolutional_decode_soft.restype = ctypes.c_ssize_t
libcorrect.correct_convolutional_decode_soft.argtypes = (ctypes.c_void_p, ctypes.c_char_p, ctypes.c_size_t, ctypes.c_char_p)
# Create a codec for the rate 1/4 mother code
conv_1_4 = libcorrect.correct_convolutional_create(
4, # Inverse rate
5, # Order
(ctypes.c_uint16 * 4)(0b10011, 0b11101, 0b10111, 0b11011) # Polynomials
)
def generate_scrambling_sequence(length, e):
"""Do the stuff from EN 300 396-2 8.2.5.2.
e is the DM colour code, an array or list of length 30."""
taps = (1,2,4,5,7,8,10,11,12,16,22,23,26,32)
# Initialization
p = [1,1] + list(e) + [0]*length
if len(p) != 32 + length:
raise ValueError("e should have a length of 30")
for k in range(32, 32 + length):
p[k] = sum(p[k-j] for j in taps) % 2
# TODO: Maybe the whole calculation above could be done with numpy arrays
return np.array(p[32:], dtype=np.uint8)
# Scrambling sequence for synchronization block 1
sb_scrambling = generate_scrambling_sequence(120, [0]*30)
def scramble(bits, seq):
"""Convert between type 4 and type 5 bits"""
return bits ^ seq
def scramble_soft(bits, seq):
"""Convert between type 4 and type 5 soft bits"""
return bits ^ (seq * 0xFF)
# Descrambling and scrambling are actually the same operations,
# so just use the same functions and give them another name.
descramble = scramble
descramble_soft = scramble_soft
def generate_deinterleaving_pattern(K, a):
"""Generate an interleaving pattern to convert between type 4 and type 3 bits
according to EN 300 396-2 8.2.4.1."""
return np.fromiter((((a * (i+1)) % K) for i in range(K)), dtype=np.int)
def deinterleave(bits, pattern):
"""Convert type 4 bits to type 3 bits using an interleaving pattern"""
return bits[pattern]
def hard_to_soft(bits):
"""Convert hard bits (0 or 1) to soft bits (0-0xFF).
Both are numpy arrays with dtype=np.uint8."""
return np.array((0, 0xFF), dtype=np.uint8)[bits]
def soft_to_hard(softbits):
"""Convert soft bits (0-0xFF) to hard bits (0 or 1).
Both are numpy arrays with dtype=np.uint8."""
return (softbits >= 0x80).astype(np.uint8)
def generate_puncturing_pattern(K3, rate = (2,3)):
"""Generate a puncturing pattern for rate 2/3.
Parameter K3 is the length of a punctured codeword.
Return a tuple, where first item is the number of bits in an unpunctured
codeword, and second item is a numpy array that maps bit positions
in a punctured codeword into bit positions in an unpunctured codeword."""
# TODO: Other rates
if rate != (2,3):
raise ValueError("Only rate 2/3 is implemented for now")
# Parameters
K2 = int(K3 * 2 / 3)
t = 3
P = (None, 1, 2, 5)
pattern = np.zeros(K3, dtype=np.int)
for j in range(1, 1+K3):
i = j
k = 8 * ((i - 1) // t) + P[i - t * ((i - 1) // t)]
pattern[j-1] = k-1
return (4 * K2, pattern)
def depuncture(b3, pattern):
"""Depuncture type 3 soft bits using a puncturing pattern.
Soft bits should be a numpy array with dtype=np.uint8."""
# Mark everything that does not get filled as an erasure (0x80).
# Add 16 extra erasures in the end, since the libcorrect decoder
# seems to need them to decode the last bits correctly.
v = np.full(pattern[0] + 16, 0x80, dtype=np.uint8)
v[pattern[1]] = b3
return v
def decode_1_4(softbits):
"""Decode rate 1/4 mother code"""
if softbits.dtype != np.uint8:
raise TypeError("dtype should be uint8")
decoded = np.zeros(len(softbits) // (4*8) + 1, dtype=np.uint8)
n_decoded = libcorrect.correct_convolutional_decode_soft(
conv_1_4, # Codec
ctypes.c_char_p(softbits.ctypes.data), # Encoded soft bits
len(softbits), # Number of encoded bits
ctypes.c_char_p(decoded.ctypes.data), # Buffer for decoded data
)
assert n_decoded <= len(decoded)
# libcorrect returns 8 bits packed into a byte,
# but we want just 1 bit per byte, so unpack
return np.unpackbits(decoded, bitorder="big")[0:76]
# If numba is not available, just remove the @numba.jit line.
@numba.jit
def crc16(bits):
"""(K1+16, K1) block code from EN 300 396-2 8.2.3.2, i.e. CRC"""
crc = 0xFFFF # Shift register stored as an integer
CRCPOLY = 0x8408
for b in bits:
crc = (crc >> 1) ^ (CRCPOLY if ((b ^ crc) & 1) else 0)
crc ^= 0xFFFF
crc_bits = np.zeros(16, dtype=np.uint8)
for i in range(16):
crc_bits[i] = (crc >> i) & 1
return crc_bits
| [
"ctypes.POINTER",
"numpy.unpackbits",
"numpy.array",
"numpy.zeros",
"ctypes.CDLL",
"numpy.full",
"ctypes.c_char_p"
] | [((321, 376), 'ctypes.CDLL', 'ctypes.CDLL', (['"""../../libcorrect/build/lib/libcorrect.so"""'], {}), "('../../libcorrect/build/lib/libcorrect.so')\n", (332, 376), False, 'import ctypes\n'), ((529, 560), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint16'], {}), '(ctypes.c_uint16)\n', (543, 560), False, 'import ctypes\n'), ((1500, 1532), 'numpy.array', 'np.array', (['p[32:]'], {'dtype': 'np.uint8'}), '(p[32:], dtype=np.uint8)\n', (1508, 1532), True, 'import numpy as np\n'), ((3372, 3398), 'numpy.zeros', 'np.zeros', (['K3'], {'dtype': 'np.int'}), '(K3, dtype=np.int)\n', (3380, 3398), True, 'import numpy as np\n'), ((3920, 3965), 'numpy.full', 'np.full', (['(pattern[0] + 16)', '(128)'], {'dtype': 'np.uint8'}), '(pattern[0] + 16, 128, dtype=np.uint8)\n', (3927, 3965), True, 'import numpy as np\n'), ((5050, 5078), 'numpy.zeros', 'np.zeros', (['(16)'], {'dtype': 'np.uint8'}), '(16, dtype=np.uint8)\n', (5058, 5078), True, 'import numpy as np\n'), ((2557, 2591), 'numpy.array', 'np.array', (['(0, 255)'], {'dtype': 'np.uint8'}), '((0, 255), dtype=np.uint8)\n', (2565, 2591), True, 'import numpy as np\n'), ((4316, 4353), 'ctypes.c_char_p', 'ctypes.c_char_p', (['softbits.ctypes.data'], {}), '(softbits.ctypes.data)\n', (4331, 4353), False, 'import ctypes\n'), ((4431, 4467), 'ctypes.c_char_p', 'ctypes.c_char_p', (['decoded.ctypes.data'], {}), '(decoded.ctypes.data)\n', (4446, 4467), False, 'import ctypes\n'), ((4651, 4689), 'numpy.unpackbits', 'np.unpackbits', (['decoded'], {'bitorder': '"""big"""'}), "(decoded, bitorder='big')\n", (4664, 4689), True, 'import numpy as np\n')] |
"""
<NAME>
created: 1/17/21
finalized: 5/4/21
weightedmedianfunc.py
This code is a function that calculates the weighted median. A list of scores and
their corresponding weights are passed in as arguments. Once the weighted median
is calculated than a graph is generated. This is help users visualize the data
and the calculations.
"""
import numpy as np
import matplotlib.pyplot as plt
def weighted_median(nscores,dist, printMode=False):
wm = 0
#get weights of each score
for i in range(len(dist)):
if dist[i] == 0:
dist[i] = 0.000000000000000000001
weights = [1.0/ x for x in dist]
#sum weights to find middle
middle = 0
for x in range(0, len(weights)):
middle += weights[x]
#get lower bound and element place of lower bound
lb = weights[0]
low = 0
while lb < middle:
lb += weights[low] + weights[low+1]
low += 1
if lb > middle:
lb -= (weights[low]+weights[low-1])
low -= 1
ub = 0.0
#get upper bound and element place of upper bound
if lb != middle:
ub = lb + weights[low] + weights[low+1]
high = low + 1
uscore = nscores[high]
lscore = nscores[low]
d = ub - lb
t = ub - middle
if d == 0:
wm = ub
else:
wm = (uscore*((d-t)/d) + lscore*(t/d))
else:
ub = lb
high = low
wm = nscores[low]
#this while will allow for users to either print the results if they want
if printMode:
try:
print("Do you want the results printed?")
print("Enter: yes or no")
resp1 = input()
print("\n\n")
if resp1 == "yes":
print("Results:")
print("Weights")
print(" Upper Bound:",round(ub,5))
print(" Lower Bound:",round(lb,5))
print(" Middle:", round(middle,5))
print("Scores")
print(" Upper Bound:",uscore,"%")
print(" Lower Bound:",lscore,"%")
print(" Weighted Median:",round(wm,2))
else:
print("Please input a valid response: yes or no.")
except Exception as e:
print(e)
#this while will allow users to print the graph if they want
if printMode:
try:
print("Would you like the graph of the weighted median printed?")
print("Enter: yes or no")
resp2 = input()
print("\n\n")
if resp2 == "yes":
#position gets the starting egdes of each bar in the graph
position = np.cumsum([2*x for x in [0] + weights[:-1]])
diameter = [2*x for x in weights] #gets width of each bar
#this codes and styles the graph to be printed to users
plt.bar(position,nscores,diameter,color = "#1E90FF", edgecolor = "black", align = "edge")
plt.axhline(wm, color = "#006400", label = "weighted median")
plt.axvline(middle, color ="red", label = "middle")
plt.axvline(ub, color ="#2F4F4F", linestyle ="dashdot", label= "upper bound" )
plt.axvline(lb, color ="#2F4F4F", linestyle ="dashed", label= "lower bound")
plt.margins(x=0)
plt.xlabel("Weights")
plt.ylabel("Scores")
plt.title("Weighted Median")
plt.legend(bbox_to_anchor=(1.05,.5), ncol= 1)
plt.plot([lb,ub],[lscore, uscore], color ="black")
plt.show()
else:
print("Please input a valid response: yes or no.")
except Exception as e:
print(e)
return wm | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.bar",
"numpy.cumsum",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.show"
] | [((2782, 2830), 'numpy.cumsum', 'np.cumsum', (['[(2 * x) for x in [0] + weights[:-1]]'], {}), '([(2 * x) for x in [0] + weights[:-1]])\n', (2791, 2830), True, 'import numpy as np\n'), ((3011, 3101), 'matplotlib.pyplot.bar', 'plt.bar', (['position', 'nscores', 'diameter'], {'color': '"""#1E90FF"""', 'edgecolor': '"""black"""', 'align': '"""edge"""'}), "(position, nscores, diameter, color='#1E90FF', edgecolor='black',\n align='edge')\n", (3018, 3101), True, 'import matplotlib.pyplot as plt\n'), ((3118, 3175), 'matplotlib.pyplot.axhline', 'plt.axhline', (['wm'], {'color': '"""#006400"""', 'label': '"""weighted median"""'}), "(wm, color='#006400', label='weighted median')\n", (3129, 3175), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3245), 'matplotlib.pyplot.axvline', 'plt.axvline', (['middle'], {'color': '"""red"""', 'label': '"""middle"""'}), "(middle, color='red', label='middle')\n", (3208, 3245), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3340), 'matplotlib.pyplot.axvline', 'plt.axvline', (['ub'], {'color': '"""#2F4F4F"""', 'linestyle': '"""dashdot"""', 'label': '"""upper bound"""'}), "(ub, color='#2F4F4F', linestyle='dashdot', label='upper bound')\n", (3277, 3340), True, 'import matplotlib.pyplot as plt\n'), ((3362, 3435), 'matplotlib.pyplot.axvline', 'plt.axvline', (['lb'], {'color': '"""#2F4F4F"""', 'linestyle': '"""dashed"""', 'label': '"""lower bound"""'}), "(lb, color='#2F4F4F', linestyle='dashed', label='lower bound')\n", (3373, 3435), True, 'import matplotlib.pyplot as plt\n'), ((3456, 3472), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)'}), '(x=0)\n', (3467, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3490, 3511), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Weights"""'], {}), "('Weights')\n", (3500, 3511), True, 'import matplotlib.pyplot as plt\n'), ((3529, 3549), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scores"""'], {}), "('Scores')\n", (3539, 3549), True, 'import matplotlib.pyplot as plt\n'), ((3567, 3595), 'matplotlib.pyplot.title', 'plt.title', (['"""Weighted Median"""'], {}), "('Weighted Median')\n", (3576, 3595), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3659), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 0.5)', 'ncol': '(1)'}), '(bbox_to_anchor=(1.05, 0.5), ncol=1)\n', (3623, 3659), True, 'import matplotlib.pyplot as plt\n'), ((3676, 3727), 'matplotlib.pyplot.plot', 'plt.plot', (['[lb, ub]', '[lscore, uscore]'], {'color': '"""black"""'}), "([lb, ub], [lscore, uscore], color='black')\n", (3684, 3727), True, 'import matplotlib.pyplot as plt\n'), ((3744, 3754), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3752, 3754), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import pqdict
import generator
def get_start_end(maze):
r, c = maze.shape
start = np.argwhere(maze == 3).flatten()
end = np.argwhere(maze == 4).flatten()
return tuple(start), tuple(end)
def l1_distance(a, b):
return np.sum(np.abs(a - np.array(b)))
def solver(maze):
r, c = maze.shape
start, end = get_start_end(maze)
directions = [np.array([1, 0]), np.array([0, 1]), np.array([-1, 0]), np.array([0, -1])]
path = []
path_pre = {start: None}
cost = {start: 0}
frontier_queue = pqdict.minpq({0: [start]})
while frontier_queue:
priority = frontier_queue.top()
frontier = frontier_queue[priority][0]
del frontier_queue[priority][0]
if not frontier_queue[priority]:
del frontier_queue[priority]
if frontier == end:
break
for dir_neighbor in directions:
next_node = tuple(frontier + dir_neighbor)
next_cost = cost[frontier] + 1
if maze[next_node] in [0, 3, 4] and (next_node not in cost or next_cost < cost[next_node]):
cost[next_node] = next_cost
path_pre[next_node] = frontier
heuristic = next_cost + l1_distance(next_node, end)
# print(next_node)
if heuristic in frontier_queue:
frontier_queue[heuristic].append(next_node)
else:
frontier_queue[heuristic] = [next_node]
node = end
while node is not None:
path.insert(0, node)
node = path_pre[node]
return path
def pad_maze(maze, scope):
return np.pad(maze, (scope // 2,), 'constant', constant_values=1)
def take_steps(maze, path, scope=5, decay=0.9):
visited = np.zeros(maze.shape, "float16")
center = path[0]
path = path[1:]
channel = 5
radius = scope // 2
direction_map = {(1, 0): 0, (0, 1): 1, (-1, 0): 2, (0, -1): 3}
scope_map = np.zeros((len(path), scope, scope, channel), dtype='float16')
visited_map = np.zeros((len(path), scope, scope, 1), dtype='float16')
action_label = np.zeros((len(path), 4), dtype='int8')
idx = 0
for next_point in path:
visited *= decay
visited[center] = 1.0
cropped_map = maze[center[0] - radius: center[0] + radius + 1, center[1] - radius: center[1] + radius + 1]
visited_map[idx, :, :, 0] = visited[center[0] - radius: center[0] + radius + 1,
center[1] - radius: center[1] + radius + 1]
for c in range(channel):
scope_map[idx, :, :, c][cropped_map == c] = 1.0
scope_map[idx, :, :, 0][cropped_map == 3] = 1.0
scope_map[idx, :, :, 0][cropped_map == 4] = 1.0
direction = next_point - np.array(center)
action_label[idx, direction_map[tuple(direction)]] = 1.0
center = next_point
idx += 1
perceptions = np.concatenate((scope_map, visited_map), axis=3)
return perceptions, action_label
def generate_samples(num_sample, size=1000, scope=49, decay=0.9):
input_tensor = np.zeros((0, scope, scope, 6))
labels = np.zeros((0, 4))
gen = generator.Prim(size, size)
for _ in range(num_sample):
maze = pad_maze(gen.generate(), scope)
path = solver(maze)
perceptions, action_label = take_steps(maze, path, scope, decay)
input_tensor = np.concatenate((input_tensor, perceptions), axis=0)
labels = np.concatenate((labels, action_label), axis=0)
if _ % 20 == 0:
print("{} mazes generated".format(_))
return input_tensor, labels
if __name__ == '__main__':
inputs, labels = generate_samples(100)
np.save("inputs.npy", inputs)
np.save("labels.npy", labels)
| [
"numpy.array",
"numpy.zeros",
"numpy.pad",
"numpy.argwhere",
"numpy.concatenate",
"pqdict.minpq",
"numpy.save",
"generator.Prim"
] | [((549, 577), 'pqdict.minpq', 'pqdict.minpq', (['{(0): [start]}'], {}), '({(0): [start]})\n', (561, 577), False, 'import pqdict\n'), ((1649, 1707), 'numpy.pad', 'np.pad', (['maze', '(scope // 2,)', '"""constant"""'], {'constant_values': '(1)'}), "(maze, (scope // 2,), 'constant', constant_values=1)\n", (1655, 1707), True, 'import numpy as np\n'), ((1772, 1803), 'numpy.zeros', 'np.zeros', (['maze.shape', '"""float16"""'], {}), "(maze.shape, 'float16')\n", (1780, 1803), True, 'import numpy as np\n'), ((2931, 2979), 'numpy.concatenate', 'np.concatenate', (['(scope_map, visited_map)'], {'axis': '(3)'}), '((scope_map, visited_map), axis=3)\n', (2945, 2979), True, 'import numpy as np\n'), ((3105, 3135), 'numpy.zeros', 'np.zeros', (['(0, scope, scope, 6)'], {}), '((0, scope, scope, 6))\n', (3113, 3135), True, 'import numpy as np\n'), ((3149, 3165), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (3157, 3165), True, 'import numpy as np\n'), ((3177, 3203), 'generator.Prim', 'generator.Prim', (['size', 'size'], {}), '(size, size)\n', (3191, 3203), False, 'import generator\n'), ((3708, 3737), 'numpy.save', 'np.save', (['"""inputs.npy"""', 'inputs'], {}), "('inputs.npy', inputs)\n", (3715, 3737), True, 'import numpy as np\n'), ((3742, 3771), 'numpy.save', 'np.save', (['"""labels.npy"""', 'labels'], {}), "('labels.npy', labels)\n", (3749, 3771), True, 'import numpy as np\n'), ((388, 404), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (396, 404), True, 'import numpy as np\n'), ((406, 422), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (414, 422), True, 'import numpy as np\n'), ((424, 441), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (432, 441), True, 'import numpy as np\n'), ((443, 460), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (451, 460), True, 'import numpy as np\n'), ((3408, 3459), 'numpy.concatenate', 'np.concatenate', (['(input_tensor, perceptions)'], {'axis': '(0)'}), '((input_tensor, perceptions), axis=0)\n', (3422, 3459), True, 'import numpy as np\n'), ((3477, 3523), 'numpy.concatenate', 'np.concatenate', (['(labels, action_label)'], {'axis': '(0)'}), '((labels, action_label), axis=0)\n', (3491, 3523), True, 'import numpy as np\n'), ((111, 133), 'numpy.argwhere', 'np.argwhere', (['(maze == 3)'], {}), '(maze == 3)\n', (122, 133), True, 'import numpy as np\n'), ((154, 176), 'numpy.argwhere', 'np.argwhere', (['(maze == 4)'], {}), '(maze == 4)\n', (165, 176), True, 'import numpy as np\n'), ((2784, 2800), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (2792, 2800), True, 'import numpy as np\n'), ((277, 288), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (285, 288), True, 'import numpy as np\n')] |
import numpy
def lax_friedrichs(q_minus, q_plus, simulation):
alpha = simulation.dx / simulation.dt
flux = numpy.zeros_like(q_minus)
f_minus = simulation.model.flux(q_minus)
f_plus = simulation.model.flux(q_plus)
flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \
alpha * (q_plus[:,0:-2] - q_minus[:,1:-1]) )
return flux
def upwind(q_minus, q_plus, simulation):
flux = numpy.zeros_like(q_minus)
flux[:, 1:-1] = simulation.model.riemann_problem_flux(q_plus [:, 0:-2],
q_minus[:, 1:-1])
return flux
| [
"numpy.zeros_like"
] | [((116, 141), 'numpy.zeros_like', 'numpy.zeros_like', (['q_minus'], {}), '(q_minus)\n', (132, 141), False, 'import numpy\n'), ((441, 466), 'numpy.zeros_like', 'numpy.zeros_like', (['q_minus'], {}), '(q_minus)\n', (457, 466), False, 'import numpy\n')] |
#%%
#%matplotlib inline
#%%
import tensorflow as tf
#%%
sess = tf.InteractiveSession()
#%%
import keras.backend as K
K.set_image_data_format("channels_first")
import keras
import numpy as np
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D
from keras.layers import Flatten, Lambda, BatchNormalization
from keras.models import Sequential
from keras.optimizers import Adam as Adam
from keras.layers.advanced_activations import LeakyReLU
#%%
# Used to save and load training histories
import pickle
from collections import defaultdict
import resource, sys
# limit recursion depth
limit = sys.getrecursionlimit()
print(limit)
#resource.setrlimit(resource.RLIMIT_STACK, (2**29, -1))
#sys.setrecusionlimit(2**29 - 1)
#%%
from scipy import io as spio
emnist = spio.loadmat("/Users/andereggt/datasets/emnist/matlab/emnist-digits.mat")
#%%
# Load training dataset and labels
x_train = emnist["dataset"][0][0][0][0][0][0]
x_train = x_train.astype(np.float32)
y_train = emnist["dataset"][0][0][0][0][0][1]
#%%
# Load test dataset and labels
x_test = emnist["dataset"][0][0][1][0][0][0]
x_test = x_test.astype(np.float32)
y_test = emnist["dataset"][0][0][1][0][0][1]
#%%
# Store labels for visualization
train_labels = y_train
test_labels = y_test
#%%
print("Training data shape: ", x_train.shape)
print("Training label shape: ", y_train.shape)
#%%
# Normalize datasets
x_train /= 255
x_test /= 255
print(x_train)
#%%
# Reshape using matlab order
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28, order="A")
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28, order="A")
print("Reshaped training data: ", x_train.shape)
#%%
# labels should be onehot encoded
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
print("One-hot encoded label shape: ", y_train.shape)
#%%
# Verify data has been loaded correctly
samplenum = 5437
import matplotlib.pyplot as plt
img = x_train[samplenum]
plt.imshow(img[0], cmap='gray')
#%%
# Reshape test labels
test_labels = test_labels.reshape(40000)
print("Reshaped test labels: ", test_labels.shape)
#%%
# Define Model
# Calculate mean and standard deviation
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
# Define function to normalize input data
def norm_input(x): return (x-mean_px)/std_px
# Batchnorm + dropout + data augmentation
def create_model():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28), output_shape=(1,28,28)),
Conv2D(32, (3,3)),
LeakyReLU(),
BatchNormalization(axis=1),
Conv2D(32, (3,3)),
LeakyReLU(),
MaxPooling2D(),
BatchNormalization(axis=1),
Conv2D(64, (3,3)),
LeakyReLU(),
BatchNormalization(axis=1),
Conv2D(64, (3,3)),
LeakyReLU(),
MaxPooling2D(),
Flatten(),
BatchNormalization(),
Dense(512),
LeakyReLU(),
BatchNormalization(),
Dropout(0.2),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
#%%
# Use Keras data augmentation
batch_size = 512
from keras.preprocessing.image import ImageDataGenerator
gen = ImageDataGenerator(rotation_range=12, width_shift_range=0.1, shear_range=0.3,
height_shift_range=0.1, zoom_range=0.1, data_format='channels_first')
batches = gen.flow(x_train, y_train, batch_size=batch_size)
test_batches = gen.flow(x_test, y_test, batch_size=batch_size)
steps_per_epoch = int(np.ceil(batches.n/batch_size))
validation_steps = int(np.ceil(test_batches.n/batch_size))
#%%
# Visualize data gen
import matplotlib.pyplot as plt
img = x_train[1]
plt.imshow(img[0], cmap='gray')
#%%
# Get augmented images
img = np.expand_dims(img, axis=0)
aug_iter = gen.flow(img)
aug_img = next(aug_iter)[0].astype(np.float32)
print("Augmented image shape: ", aug_img.shape)
import matplotlib.pyplot as plt
f = plt.figure(figsize=(12,6))
for i in range(8):
sp = f.add_subplot(2, 26//3, i+1)
sp.axis('Off')
aug_img = next(aug_iter)[0].astype(np.float32)
plt.imshow(aug_img[0], cmap='gray')
#%%
# Create 10 models
models = []
weights_epoch = 0
for i in range(10):
m = create_model()
models.append(m)
#%%
eval_batch_size = 512
num_iterations = 1
num_epochs = 10
import os
if not os.path.exists("dropout_0.2"):
os.mkdir("dropout_0.2")
if not os.path.exists("dropout_0.2/weights"):
os.mkdir("dropout_0.2/weights")
if not os.path.exists("dropout_0.2/history"):
os.mkdir("dropout_0.2/history")
for iteration in range(num_iterations):
cur_epoch = (iteration + 1) * num_epochs + weights_epoch
for i, m in enumerate(models):
m.optimizer.lr = 0.000001
h = m.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=num_epochs, verbose=0,
validation_data=test_batches, validation_steps=validation_steps)
m.save_weights("dropout_0.2/weights/{:03d}epochs_weights_model_{}.pkl".format(cur_epoch, i))
# evaluate test error rate for ensemble
all_preds = np.stack([m.predict(x_test, batch_size=eval_batch_size) for m in models])
avg_preds = all_preds.mean(axis=0)
test_error_ensemble = (1 - keras.metrics.categorical_accuracy(y_test, avg_preds).eval().mean()) * 100
# write test error rate for ensemble and every single model to text file
with open("dropout_0.2/history/test_errors_epoch_{:03d}.txt".format(cur_epoch), "w") as text_file:
text_file.write("epoch: {} test error on ensemble: {}\n".format(cur_epoch, test_error_ensemble))
for m in models:
pred = np.array(m.predict(x_test, batch_size=eval_batch_size))
test_err = (1 - keras.metrics.categorical_accuracy(y_test, pred).eval().mean()) * 100
text_file.write("{}\n".format(test_err))
#%%
eval_batch_size = 512
all_preds = np.stack([m.predict(x_test, batch_size=eval_batch_size) for m in models])
avg_preds = all_preds.mean(axis=0)
print("Ensemble error rate: ", (1 - keras.metrics.categorical_accuracy(y_test, avg_preds).eval().mean()) * 100)
#%%
| [
"keras.layers.Conv2D",
"keras.metrics.categorical_accuracy",
"scipy.io.loadmat",
"keras.preprocessing.image.ImageDataGenerator",
"keras.utils.to_categorical",
"keras.layers.Dense",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.mkdir",
"keras.layers.advanced_activations.LeakyReLU",
"keras.opt... | [((66, 89), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (87, 89), True, 'import tensorflow as tf\n'), ((121, 162), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_first"""'], {}), "('channels_first')\n", (144, 162), True, 'import keras.backend as K\n'), ((607, 630), 'sys.getrecursionlimit', 'sys.getrecursionlimit', ([], {}), '()\n', (628, 630), False, 'import resource, sys\n'), ((777, 850), 'scipy.io.loadmat', 'spio.loadmat', (['"""/Users/andereggt/datasets/emnist/matlab/emnist-digits.mat"""'], {}), "('/Users/andereggt/datasets/emnist/matlab/emnist-digits.mat')\n", (789, 850), True, 'from scipy import io as spio\n'), ((1698, 1737), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', '(10)'], {}), '(y_train, 10)\n', (1724, 1737), False, 'import keras\n'), ((1747, 1785), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', '(10)'], {}), '(y_test, 10)\n', (1773, 1785), False, 'import keras\n'), ((1961, 1992), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img[0]'], {'cmap': '"""gray"""'}), "(img[0], cmap='gray')\n", (1971, 1992), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3413), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(12)', 'width_shift_range': '(0.1)', 'shear_range': '(0.3)', 'height_shift_range': '(0.1)', 'zoom_range': '(0.1)', 'data_format': '"""channels_first"""'}), "(rotation_range=12, width_shift_range=0.1, shear_range=\n 0.3, height_shift_range=0.1, zoom_range=0.1, data_format='channels_first')\n", (3279, 3413), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((3744, 3775), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img[0]'], {'cmap': '"""gray"""'}), "(img[0], cmap='gray')\n", (3754, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3810, 3837), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (3824, 3837), True, 'import numpy as np\n'), ((3997, 4024), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (4007, 4024), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3610), 'numpy.ceil', 'np.ceil', (['(batches.n / batch_size)'], {}), '(batches.n / batch_size)\n', (3586, 3610), True, 'import numpy as np\n'), ((3633, 3669), 'numpy.ceil', 'np.ceil', (['(test_batches.n / batch_size)'], {}), '(test_batches.n / batch_size)\n', (3640, 3669), True, 'import numpy as np\n'), ((4155, 4190), 'matplotlib.pyplot.imshow', 'plt.imshow', (['aug_img[0]'], {'cmap': '"""gray"""'}), "(aug_img[0], cmap='gray')\n", (4165, 4190), True, 'import matplotlib.pyplot as plt\n'), ((4391, 4420), 'os.path.exists', 'os.path.exists', (['"""dropout_0.2"""'], {}), "('dropout_0.2')\n", (4405, 4420), False, 'import os\n'), ((4426, 4449), 'os.mkdir', 'os.mkdir', (['"""dropout_0.2"""'], {}), "('dropout_0.2')\n", (4434, 4449), False, 'import os\n'), ((4457, 4494), 'os.path.exists', 'os.path.exists', (['"""dropout_0.2/weights"""'], {}), "('dropout_0.2/weights')\n", (4471, 4494), False, 'import os\n'), ((4500, 4531), 'os.mkdir', 'os.mkdir', (['"""dropout_0.2/weights"""'], {}), "('dropout_0.2/weights')\n", (4508, 4531), False, 'import os\n'), ((4539, 4576), 'os.path.exists', 'os.path.exists', (['"""dropout_0.2/history"""'], {}), "('dropout_0.2/history')\n", (4553, 4576), False, 'import os\n'), ((4582, 4613), 'os.mkdir', 'os.mkdir', (['"""dropout_0.2/history"""'], {}), "('dropout_0.2/history')\n", (4590, 4613), False, 'import os\n'), ((3064, 3070), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (3068, 3070), True, 'from keras.optimizers import Adam as Adam\n'), ((2442, 2511), 'keras.layers.Lambda', 'Lambda', (['norm_input'], {'input_shape': '(1, 28, 28)', 'output_shape': '(1, 28, 28)'}), '(norm_input, input_shape=(1, 28, 28), output_shape=(1, 28, 28))\n', (2448, 2511), False, 'from keras.layers import Flatten, Lambda, BatchNormalization\n'), ((2517, 2535), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (2523, 2535), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((2544, 2555), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (2553, 2555), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2565, 2591), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (2583, 2591), False, 'from keras.layers import Flatten, Lambda, BatchNormalization\n'), ((2601, 2619), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (2607, 2619), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((2628, 2639), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (2637, 2639), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2649, 2663), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (2661, 2663), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((2673, 2699), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (2691, 2699), False, 'from keras.layers import Flatten, Lambda, BatchNormalization\n'), ((2709, 2727), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (2715, 2727), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((2736, 2747), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (2745, 2747), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2757, 2783), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(1)'}), '(axis=1)\n', (2775, 2783), False, 'from keras.layers import Flatten, Lambda, BatchNormalization\n'), ((2793, 2811), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (2799, 2811), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((2820, 2831), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (2829, 2831), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2841, 2855), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (2853, 2855), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((2865, 2874), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2872, 2874), False, 'from keras.layers import Flatten, Lambda, BatchNormalization\n'), ((2884, 2904), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2902, 2904), False, 'from keras.layers import Flatten, Lambda, BatchNormalization\n'), ((2914, 2924), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (2919, 2924), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((2934, 2945), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (2943, 2945), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2955, 2975), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2973, 2975), False, 'from keras.layers import Flatten, Lambda, BatchNormalization\n'), ((2985, 2997), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2992, 2997), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((3007, 3038), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (3012, 3038), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D\n'), ((5289, 5342), 'keras.metrics.categorical_accuracy', 'keras.metrics.categorical_accuracy', (['y_test', 'avg_preds'], {}), '(y_test, avg_preds)\n', (5323, 5342), False, 'import keras\n'), ((6094, 6147), 'keras.metrics.categorical_accuracy', 'keras.metrics.categorical_accuracy', (['y_test', 'avg_preds'], {}), '(y_test, avg_preds)\n', (6128, 6147), False, 'import keras\n'), ((5787, 5835), 'keras.metrics.categorical_accuracy', 'keras.metrics.categorical_accuracy', (['y_test', 'pred'], {}), '(y_test, pred)\n', (5821, 5835), False, 'import keras\n')] |
"""
Created by <NAME> (<EMAIL>)
"""
import copy
import gc
import glob
import numpy as np
import os
import random
import scipy.io
import shutil
import time
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim
import torchvision.transforms as transforms
from PIL import Image
from scipy.spatial.distance import pdist, squareform
from sklearn.decomposition import PCA
from torch.hub import load_state_dict_from_url
from intrinsic_dimension_2NN import estimate
from layer_rotation import layer_rotation
from mutual_info_EDGE import EDGE
from original_variant_AlexNet_model import DNNforVPL
from reading_stimuli import reading_stimuli
# The pretrained weights of AlexNet
model_urls = {'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth'}
pretrained_dict = load_state_dict_from_url(model_urls['alexnet'])
### A class for formatting different metrics of accuracy during training and transfer
class AverageMeter(object):
"""Computes and stores the average and current values"""
def __init__(self, name, fmt = ':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n = 1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '}'
if self.name == 'Accuracy':
self.__dict__['val'] = self.val.item()
self.__dict__['avg'] = self.avg.item()
self.__dict__['sum'] = self.sum.item()
output = fmtstr.format(**self.__dict__)
else:
output = fmtstr.format(**self.__dict__)
return output
### A class for showing a progress bar during training and transfer
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix = ""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
### A function for computing accuracy during training and transfer
def accuracy(output, target, topk = 1):
"""Computes the accuracy over the top1 predictions"""
with torch.no_grad():
batch_size = target.size(0)
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
correct_k = correct[:1].view(-1).float().sum(0, keepdim = True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
### A function for adjusting the learning rate during training
def adjust_learning_rate(optimizer, session, lr):
"""Sets the learning rate to the initial LR decayed by 2 every 1 session"""
lr = lr * (0.5 ** (session))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
### A function for saving the checkpoints during training
def save_checkpoint(state, is_best, group, filename):
""" Saves the checkpoints during training """
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'DNNforVPL_best_' + group + '.pth.tar')
### A fucntion which performs different experiments with the original variant of AlexNet
def original_variant_alexnet(parent_folder = 'Original Variant of Alexnet_New Results', number_simulation = 10, number_PCA_component = 20, num_sample_artiphysiology = 1000):
### Initializing the main variables
x_sample_artiphysiology_index = np.zeros((num_sample_artiphysiology, 3), dtype = np.int64)
for i in range(0, num_sample_artiphysiology):
x_sample_artiphysiology_index[i, 0] = random.randrange(1)
x_sample_artiphysiology_index[i, 1] = random.randrange(20)
x_sample_artiphysiology_index[i, 2] = random.randrange(180)
number_group = 4
number_layer = 5
number_layer_freeze = 6
all_simulation_training_accuracy = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_transfer_accuracy = np.zeros((number_simulation, number_group, number_layer_freeze, 10), dtype = np.float32)
all_simulation_all_MI_original = np.zeros((number_simulation, number_group, number_layer, number_layer_freeze), dtype = np.float32)
all_simulation_all_MI_noise = np.zeros((number_simulation, number_group, number_layer, number_layer_freeze), dtype = np.float32)
all_simulation_all_ID = np.zeros((number_simulation, number_group, number_layer, number_layer_freeze, 19), dtype = np.float32)
all_x_sample_ID = np.zeros((number_simulation, number_group), dtype = np.float32)
all_simulation_training_accuracy_permuted = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_all_ID_permuted = np.zeros((number_simulation, number_group, number_layer, number_layer_freeze, 19), dtype = np.float32)
all_PCA_explained_variance_layer_1 = np.zeros((number_simulation, number_group, number_layer_freeze, number_PCA_component), dtype = np.float32)
all_PCA_explained_variance_layer_2 = np.zeros((number_simulation, number_group, number_layer_freeze, number_PCA_component), dtype = np.float32)
all_PCA_explained_variance_layer_3 = np.zeros((number_simulation, number_group, number_layer_freeze, number_PCA_component), dtype = np.float32)
all_PCA_explained_variance_layer_4 = np.zeros((number_simulation, number_group, number_layer_freeze, number_PCA_component), dtype = np.float32)
all_PCA_explained_variance_layer_5 = np.zeros((number_simulation, number_group, number_layer_freeze, number_PCA_component), dtype = np.float32)
all_simulation_weight_change_layer_1 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_weight_change_layer_2 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_weight_change_layer_3 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_weight_change_layer_4 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_weight_change_layer_5 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_layer_rotation_layer_1 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_layer_rotation_layer_2 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_layer_rotation_layer_3 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_layer_rotation_layer_4 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
all_simulation_layer_rotation_layer_5 = np.zeros((number_simulation, number_group, number_layer_freeze, 180), dtype = np.float32)
os.mkdir(parent_folder)
for simulation_counter in range(number_simulation):
print('Simulation: ', simulation_counter + 1)
os.mkdir(parent_folder + '/Simulation_' + str(simulation_counter + 1))
group_counter = -1
for group_training in ['group1', 'group2', 'group3', 'group4']:
gc.collect()
best_acc1 = 0
group_counter = group_counter + 1
print('Group: ', group_training)
os.mkdir(parent_folder + '/Simulation_' + str(simulation_counter + 1) + '/' + group_training)
### Training Stimuli
# The structure of image names in different groups
if group_training == 'group1':
SF_training = [170]
Ori_training = [23325, 23350, 23375, 23400, 23425, 23450, 23475, 23500, 23525, 23550,
23650, 23675, 23700, 23725, 23750, 23775, 23800, 23825, 23850, 23875]
elif group_training == 'group2':
SF_training = [53, 170, 276]
Ori_training = [23325, 23350, 23375, 23400, 23425, 23450, 23475, 23500, 23525, 23550,
23650, 23675, 23700, 23725, 23750, 23775, 23800, 23825, 23850, 23875]
elif group_training == 'group3':
SF_training = [170]
Ori_training = [23075, 23100, 23125, 23150, 23175, 23200, 23225, 23250, 23275, 23300,
23900, 23925, 23950, 23975, 24000, 24025, 24050, 24075, 24100, 24125]
elif group_training == 'group4':
SF_training = [53, 170, 276]
Ori_training = [23075, 23100, 23125, 23150, 23175, 23200, 23225, 23250, 23275, 23300,
23900, 23925, 23950, 23975, 24000, 24025, 24050, 24075, 24100, 24125]
# Reading all images
if group_training == 'group1' or group_training == 'group2':
file_name_paths = glob.glob(os.path.dirname(os.path.abspath("./")) + '/data/stimuli/training_groups1&2/*.TIFF')
elif group_training == 'group3' or group_training == 'group4':
file_name_paths = glob.glob(os.path.dirname(os.path.abspath("./")) + '/data/stimuli/training_groups3&4/*.TIFF')
file_names = [os.path.basename(x) for x in file_name_paths]
x_val_training, y_val_training, z_val_training, x_tensor_training, y_tensor_training = reading_stimuli(file_names = file_names, file_name_paths = file_name_paths, orientation = Ori_training, spatial_frequency = SF_training)
x_tensor_training = torch.stack(x_tensor_training)
y_tensor_training = torch.stack(y_tensor_training)
print(x_tensor_training.shape, y_tensor_training.shape)
### SF Transfer Stimuli
# The structure of image names in different groups
if group_training == 'group1':
group_transfer = 'group1'
SF_transfer = [96]
Ori_transfer = [23325, 23350, 23375, 23400, 23425, 23450, 23475, 23500, 23525, 23550,
23650, 23675, 23700, 23725, 23750, 23775, 23800, 23825, 23850, 23875]
elif group_training == 'group2':
group_transfer = 'group2'
SF_transfer= [96]
Ori_transfer = [23325, 23350, 23375, 23400, 23425, 23450, 23475, 23500, 23525, 23550,
23650, 23675, 23700, 23725, 23750, 23775, 23800, 23825, 23850, 23875]
elif group_training == 'group3':
group_transfer = 'group3'
SF_transfer = [96]
Ori_transfer = [23075, 23100, 23125, 23150, 23175, 23200, 23225, 23250, 23275, 23300,
23900, 23925, 23950, 23975, 24000, 24025, 24050, 24075, 24100, 24125]
elif group_training == 'group4':
group_transfer = 'group4'
SF_transfer = [96]
Ori_transfer = [23075, 23100, 23125, 23150, 23175, 23200, 23225, 23250, 23275, 23300,
23900, 23925, 23950, 23975, 24000, 24025, 24050, 24075, 24100, 24125]
# Reading all images
if group_transfer == 'group1' or group_transfer == 'group2':
file_name_paths = glob.glob(os.path.dirname(os.path.abspath("./")) + '/data/stimuli/transferSF_groups1&2/*.TIFF')
elif group_transfer == 'group3' or group_transfer == 'group4':
file_name_paths = glob.glob(os.path.dirname(os.path.abspath("./")) + '/data/stimuli/transferSF_groups3&4/*.TIFF')
file_names = [os.path.basename(x) for x in file_name_paths]
x_val_transfer, y_val_transfer, z_val_transfer, x_tensor_transfer, y_tensor_transfer = reading_stimuli(file_names = file_names, file_name_paths = file_name_paths, orientation = Ori_transfer, spatial_frequency = SF_transfer)
x_tensor_transfer = torch.stack(x_tensor_transfer)
y_tensor_transfer = torch.stack(y_tensor_transfer)
print(x_tensor_transfer.shape, y_tensor_transfer.shape)
layer_freeze_counter = -1
for layer_freeze in [None, 0, 3, 6, 8, 10]:
layer_freeze_counter = layer_freeze_counter + 1
print('Frozen Layer: ', layer_freeze)
# Read the reference image
file_name_path_ref = glob.glob(os.path.dirname(os.path.abspath("./")) + '/data/stimuli/reference_stimulus.TIFF')
# Define the main reference variable
x_val_ref = np.zeros((224, 224, 3), dtype = np.float32)
x_tensor_ref = []
# Load image
img = Image.open(file_name_path_ref[0]).convert('RGB')
# Resize image
width, height = img.size
new_width = width * 256 // min(img.size)
new_height = height * 256 // min(img.size)
img = img.resize((new_width, new_height), Image.BILINEAR)
# Center crop image
width, height = img.size
startx = width // 2 - (224 // 2)
starty = height // 2 - (224 // 2)
img = np.asarray(img).reshape(height, width, 3)
img = img[starty:starty + 224, startx:startx + 224]
assert img.shape[0] == 224 and img.shape[1] == 224, (img.shape, height, width)
# Save image
x_val_ref[:, :, :] = img[:, :, :]
# Convert image to tensor, then normalize and copy it
x_temp = torch.from_numpy(np.transpose(x_val_ref[:, :, :], (2, 0, 1)))
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
for i in range(len(SF_training) * len(Ori_training)):
x_tensor_ref.append(normalize(x_temp))
x_tensor_ref = torch.stack(x_tensor_ref)
print(x_tensor_ref.shape)
# Select GPU
global device
gpu = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Use GPU: {} for training".format(gpu))
# Load the PyTorch model
model = DNNforVPL()
model_dict = model.state_dict()
# Filter out unnecessary keys
pretrained_dict_model = {k : v for k, v in pretrained_dict.items() if k in model_dict}
# Overwrite entries in the existing state dict
model_dict.update(pretrained_dict_model)
# Load the new state dict
model.load_state_dict(model_dict)
# Initialize by zero the weights of the fully-connected layer of the model
nn.init.zeros_(model.classifier[0].weight)
nn.init.zeros_(model.classifier[0].bias)
# Set all the parameters of the model to be trainable
for param in model.parameters():
param.requires_grad = True
if layer_freeze != None:
model.features[layer_freeze].weight.requires_grad = False
model.features[layer_freeze].bias.requires_grad = False
# Send the model to GPU/CPU
model = model.to(device)
# Model summary
print(model)
cudnn.benchmark = True
### Extracting the activations of convolutional layers of the network per transfer stimulus before training
# The indices of consecutive convolutional layers: (0, 3, 6, 8, 10)
# The sizes of consecutive convolutional layers: (55, 27, 13, 13, 13)
# The positions of central units of consecutive convolutional layers: (27, 13, 6, 6, 6)
# The number of channels of consecutive convolutional layers: (64, 192, 384, 256, 256)
if layer_freeze == None:
os.mkdir(parent_folder + '/Simulation_' + str(simulation_counter + 1) + '/' + group_training + '/before_training')
saving_folder = parent_folder + '/Simulation_' + str(simulation_counter + 1) + '/' + group_training + '/before_training'
# The target stimuli
feature_sample_artiphysiology = np.zeros((num_sample_artiphysiology, 3), dtype = np.int64)
all_x_sample = np.zeros((num_sample_artiphysiology, 3, 224, 224), dtype = np.float32)
all_unit_activity_Conv2d_1 = np.zeros((num_sample_artiphysiology, 64, 55, 55), dtype = np.float32)
all_unit_activity_Conv2d_2 = np.zeros((num_sample_artiphysiology, 192, 27, 27), dtype = np.float32)
all_unit_activity_Conv2d_3 = np.zeros((num_sample_artiphysiology, 384, 13, 13), dtype = np.float32)
all_unit_activity_Conv2d_4 = np.zeros((num_sample_artiphysiology, 256, 13, 13), dtype = np.float32)
all_unit_activity_Conv2d_5 = np.zeros((num_sample_artiphysiology, 256, 13, 13), dtype = np.float32)
for i in range(num_sample_artiphysiology):
feature_sample_artiphysiology[i, :] = [SF_transfer[x_sample_artiphysiology_index[i, 0]], Ori_transfer[x_sample_artiphysiology_index[i, 1]], x_sample_artiphysiology_index[i, 2]]
index = torch.tensor(z_val_transfer[x_sample_artiphysiology_index[i, 0], x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]], dtype = torch.long)
x_sample = torch.index_select(x_tensor_transfer, 0, index)
x_sample = x_sample.cuda(gpu)
unit_activity_layer_0 = model.features[0](x_sample)
unit_activity_layer_1 = model.features[1](unit_activity_layer_0)
unit_activity_layer_2 = model.features[2](unit_activity_layer_1)
unit_activity_layer_3 = model.features[3](unit_activity_layer_2)
unit_activity_layer_4 = model.features[4](unit_activity_layer_3)
unit_activity_layer_5 = model.features[5](unit_activity_layer_4)
unit_activity_layer_6 = model.features[6](unit_activity_layer_5)
unit_activity_layer_7 = model.features[7](unit_activity_layer_6)
unit_activity_layer_8 = model.features[8](unit_activity_layer_7)
unit_activity_layer_9 = model.features[9](unit_activity_layer_8)
unit_activity_layer_10 = model.features[10](unit_activity_layer_9)
unit_activity_layer_11 = model.features[11](unit_activity_layer_10)
unit_activity_layer_12 = model.features[12](unit_activity_layer_11)
all_x_sample[i, :] = x_sample.detach().cpu().clone().numpy()
all_unit_activity_Conv2d_1[i, :] = unit_activity_layer_0[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_2[i, :] = unit_activity_layer_3[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_3[i, :] = unit_activity_layer_6[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_4[i, :] = unit_activity_layer_8[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_5[i, :] = unit_activity_layer_10[0].detach().cpu().clone().numpy()
# Saving the properties of sample stimuli used for calculating intrinsic dimension
scipy.io.savemat(saving_folder + '/feature_sample_artiphysiology.mat', mdict = {'feature_sample_artiphysiology': feature_sample_artiphysiology})
### Calculating the intrinsic dimension
all_x_sample_ID[simulation_counter, group_counter] = estimate(squareform(pdist(all_x_sample.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 0, layer_freeze_counter, 0] = estimate(squareform(pdist(all_unit_activity_Conv2d_1.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 1, layer_freeze_counter, 0] = estimate(squareform(pdist(all_unit_activity_Conv2d_2.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 2, layer_freeze_counter, 0] = estimate(squareform(pdist(all_unit_activity_Conv2d_3.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 3, layer_freeze_counter, 0] = estimate(squareform(pdist(all_unit_activity_Conv2d_4.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 4, layer_freeze_counter, 0] = estimate(squareform(pdist(all_unit_activity_Conv2d_5.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 0, layer_freeze_counter, 0] = all_simulation_all_ID[simulation_counter, group_counter, 0, layer_freeze_counter, 0]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 1, layer_freeze_counter, 0] = all_simulation_all_ID[simulation_counter, group_counter, 1, layer_freeze_counter, 0]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 2, layer_freeze_counter, 0] = all_simulation_all_ID[simulation_counter, group_counter, 2, layer_freeze_counter, 0]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 3, layer_freeze_counter, 0] = all_simulation_all_ID[simulation_counter, group_counter, 3, layer_freeze_counter, 0]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 4, layer_freeze_counter, 0] = all_simulation_all_ID[simulation_counter, group_counter, 4, layer_freeze_counter, 0]
# Define the main learning parameters
lr = 0.00001
momentum = 0.9
weight_decay = 0.0001
# Define the loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = torch.optim.SGD(model.parameters(), lr, momentum = momentum, weight_decay = weight_decay)
# Save the initial weights of the convolutional layers of the model
Conv2d_1_0 = copy.deepcopy(model.features[0].weight)
Conv2d_2_0 = copy.deepcopy(model.features[3].weight)
Conv2d_3_0 = copy.deepcopy(model.features[6].weight)
Conv2d_4_0 = copy.deepcopy(model.features[8].weight)
Conv2d_5_0 = copy.deepcopy(model.features[10].weight)
# Define the main training parameters
start_session = 0
sessions = 1
z_val_shuffle = copy.deepcopy(z_val_training)
for i in range(len(SF_training)):
for j in range(len(Ori_training)):
random.shuffle(z_val_shuffle[i, j, :])
for session in range(start_session, sessions):
# Adjust the learning rate
adjust_learning_rate(optimizer, session, lr)
# Train on the training set
epochs = 180
ID_counter = 0
for epoch in range(epochs):
z_val_shuffle_1D = np.unique(z_val_shuffle[:, :, epoch])
indices = torch.tensor(z_val_shuffle_1D, dtype = torch.long)
x_train = torch.index_select(x_tensor_training, 0, indices)
y_train = torch.index_select(y_tensor_training, 0, indices)
y_train = y_train.squeeze(1)
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Accuracy', ':6.2f')
progress = ProgressMeter(epochs, [batch_time, losses, top1], prefix = ("Training >>> Session: " + str(session) + " Epoch: [{}]").format(epoch))
# Switch to training mode
model.train()
with torch.set_grad_enabled(True):
end = time.time()
x_ref = x_tensor_ref.cuda(gpu)
x_train = x_train.cuda(gpu)
y_train = y_train.cuda(gpu)
# Compute output
output = model(x_train, x_ref)
loss = criterion(output, y_train)
# Measure accuracy and record loss
acc1 = accuracy(output, y_train, topk = 1)
losses.update(loss.item(), x_train.size(0))
top1.update(acc1[0], x_train.size(0))
# Compute gradient and perform SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Save the validation accuracy for plotting
all_simulation_training_accuracy[simulation_counter, group_counter, layer_freeze_counter, epoch] = acc1[0].item()
# Measure elapsed time
batch_time.update(time.time() - end)
progress.display(epoch)
# Remember the best accuracy
is_best = all_simulation_training_accuracy[simulation_counter, group_counter, layer_freeze_counter, epoch] >= best_acc1
best_acc1 = max(all_simulation_training_accuracy[simulation_counter, group_counter, layer_freeze_counter, epoch], best_acc1)
all_simulation_weight_change_layer_1[simulation_counter, group_counter, layer_freeze_counter, epoch] = (torch.pow(torch.sum(torch.pow(model.features[0].weight - Conv2d_1_0, 2)), 0.5) / torch.pow(torch.sum(torch.pow(model.features[0].weight, 2)), 0.5)).item()
all_simulation_weight_change_layer_2[simulation_counter, group_counter, layer_freeze_counter, epoch] = (torch.pow(torch.sum(torch.pow(model.features[3].weight - Conv2d_2_0, 2)), 0.5) / torch.pow(torch.sum(torch.pow(model.features[3].weight, 2)), 0.5)).item()
all_simulation_weight_change_layer_3[simulation_counter, group_counter, layer_freeze_counter, epoch] = (torch.pow(torch.sum(torch.pow(model.features[6].weight - Conv2d_3_0, 2)), 0.5) / torch.pow(torch.sum(torch.pow(model.features[6].weight, 2)), 0.5)).item()
all_simulation_weight_change_layer_4[simulation_counter, group_counter, layer_freeze_counter, epoch] = (torch.pow(torch.sum(torch.pow(model.features[8].weight - Conv2d_4_0, 2)), 0.5) / torch.pow(torch.sum(torch.pow(model.features[8].weight, 2)), 0.5)).item()
all_simulation_weight_change_layer_5[simulation_counter, group_counter, layer_freeze_counter, epoch] = (torch.pow(torch.sum(torch.pow(model.features[10].weight - Conv2d_5_0, 2)), 0.5) / torch.pow(torch.sum(torch.pow(model.features[10].weight, 2)), 0.5)).item()
all_simulation_layer_rotation_layer_1[simulation_counter, group_counter, layer_freeze_counter, epoch] = layer_rotation(model.features[0].weight, Conv2d_1_0)
all_simulation_layer_rotation_layer_2[simulation_counter, group_counter, layer_freeze_counter, epoch] = layer_rotation(model.features[3].weight, Conv2d_2_0)
all_simulation_layer_rotation_layer_3[simulation_counter, group_counter, layer_freeze_counter, epoch] = layer_rotation(model.features[6].weight, Conv2d_3_0)
all_simulation_layer_rotation_layer_4[simulation_counter, group_counter, layer_freeze_counter, epoch] = layer_rotation(model.features[8].weight, Conv2d_4_0)
all_simulation_layer_rotation_layer_5[simulation_counter, group_counter, layer_freeze_counter, epoch] = layer_rotation(model.features[10].weight, Conv2d_5_0)
if (layer_freeze == None or layer_freeze == 0 or layer_freeze == 10) and epoch % 10 == 0:
ID_counter = ID_counter + 1
for i in range(num_sample_artiphysiology):
feature_sample_artiphysiology[i, :] = [SF_transfer[x_sample_artiphysiology_index[i, 0]], Ori_transfer[x_sample_artiphysiology_index[i, 1]], x_sample_artiphysiology_index[i, 2]]
index = torch.tensor(z_val_transfer[x_sample_artiphysiology_index[i, 0], x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]], dtype = torch.long)
x_sample = torch.index_select(x_tensor_transfer, 0, index)
x_sample = x_sample.cuda(gpu)
unit_activity_layer_0 = model.features[0](x_sample)
unit_activity_layer_1 = model.features[1](unit_activity_layer_0)
unit_activity_layer_2 = model.features[2](unit_activity_layer_1)
unit_activity_layer_3 = model.features[3](unit_activity_layer_2)
unit_activity_layer_4 = model.features[4](unit_activity_layer_3)
unit_activity_layer_5 = model.features[5](unit_activity_layer_4)
unit_activity_layer_6 = model.features[6](unit_activity_layer_5)
unit_activity_layer_7 = model.features[7](unit_activity_layer_6)
unit_activity_layer_8 = model.features[8](unit_activity_layer_7)
unit_activity_layer_9 = model.features[9](unit_activity_layer_8)
unit_activity_layer_10 = model.features[10](unit_activity_layer_9)
unit_activity_layer_11 = model.features[11](unit_activity_layer_10)
unit_activity_layer_12 = model.features[12](unit_activity_layer_11)
all_unit_activity_Conv2d_1[i, :] = unit_activity_layer_0[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_2[i, :] = unit_activity_layer_3[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_3[i, :] = unit_activity_layer_6[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_4[i, :] = unit_activity_layer_8[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_5[i, :] = unit_activity_layer_10[0].detach().cpu().clone().numpy()
### Calculating the intrinsic dimension
all_simulation_all_ID[simulation_counter, group_counter, 0, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_1.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 1, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_2.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 2, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_3.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 3, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_4.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID[simulation_counter, group_counter, 4, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_5.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
# Save the checkpoint
save_checkpoint({
'session': session + 1,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, group_training, 'DNNforVPL_' + group_training + '.pth.tar')
# Read the reference image
file_name_path_ref = glob.glob(os.path.dirname(os.path.abspath("./")) + '/data/stimuli/reference_stimulus.TIFF')
# Define the main reference variable
x_val_ref = np.zeros((224, 224, 3), dtype = np.float32)
x_tensor_ref = []
# Load image
img = Image.open(file_name_path_ref[0]).convert('RGB')
# Resize image
width, height = img.size
new_width = width * 256 // min(img.size)
new_height = height * 256 // min(img.size)
img = img.resize((new_width, new_height), Image.BILINEAR)
# Center crop image
width, height = img.size
startx = width // 2 - (224 // 2)
starty = height // 2 - (224 // 2)
img = np.asarray(img).reshape(height, width, 3)
img = img[starty:starty + 224, startx:startx + 224]
assert img.shape[0] == 224 and img.shape[1] == 224, (img.shape, height, width)
# Save image
x_val_ref[:, :, :] = img[:, :, :]
# Convert image to tensor, then normalize and copy it
x_temp = torch.from_numpy(np.transpose(x_val_ref[:, :, :], (2, 0, 1)))
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
for i in range(len(SF_transfer) * len(Ori_transfer)):
x_tensor_ref.append(normalize(x_temp))
x_tensor_ref = torch.stack(x_tensor_ref)
print(x_tensor_ref.shape)
# Select GPU
gpu = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Use GPU: {} for transfer".format(gpu))
# Set all the parameters of the model to be trainable
for param in model.parameters():
param.requires_grad = False
# Send the model to GPU/CPU
model = model.to(device)
# Model summary
print(model)
cudnn.benchmark = True
# Define the main validation parameters
start_session = 0
sessions = 10
for session in range(start_session, sessions):
z_val_shuffle = copy.deepcopy(z_val_transfer)
for j in range(len(SF_transfer)):
for k in range(len(Ori_transfer)):
random.shuffle(z_val_shuffle[j, k, :])
# Evaluate on the validation set
z_val_shuffle_1D = np.unique(z_val_shuffle[:, :, session])
indices = torch.tensor(z_val_shuffle_1D, dtype = torch.long)
x_valid = torch.index_select(x_tensor_transfer, 0, indices)
y_valid = torch.index_select(y_tensor_transfer, 0, indices)
y_valid = y_valid.squeeze(1)
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Accuracy', ':6.2f')
progress = ProgressMeter(1, [batch_time, losses, top1], prefix = ("Transfer >>> Session: " + str(session) + " Epoch: [{}]").format(1))
# Switch to evaluating mode
model.eval()
with torch.no_grad():
end = time.time()
x_ref = x_tensor_ref.cuda(gpu)
x_valid = x_valid.cuda(gpu)
y_valid = y_valid.cuda(gpu)
# Compute output
output = model(x_valid, x_ref)
loss = criterion(output, y_valid)
# Measure accuracy and record loss
acc1 = accuracy(output, y_valid, topk = 1)
losses.update(loss.item(), x_valid.size(0))
top1.update(acc1[0], x_valid.size(0))
# Save the validation accuracy for plotting
all_simulation_transfer_accuracy[simulation_counter, group_counter, layer_freeze_counter, session - start_session] = acc1[0].item()
# Measure elapsed time
batch_time.update(time.time() - end)
progress.display(1)
# Remember the best accuracy
is_best = all_simulation_transfer_accuracy[simulation_counter, group_counter, layer_freeze_counter, session - start_session] >= best_acc1
best_acc1 = max(all_simulation_transfer_accuracy[simulation_counter, group_counter, layer_freeze_counter, session - start_session], best_acc1)
### Extracting the activations of convolutional layers of the network per transfer stimulus after training
# The indices of consecutive convolutional layers: (0, 3, 6, 8, 10)
# The sizes of consecutive convolutional layers: (55, 27, 13, 13, 13)
# The positions of central units of consecutive convolutional layers: (27, 13, 6, 6, 6)
# The number of channels of consecutive convolutional layers: (64, 192, 384, 256, 256)
os.mkdir(parent_folder + '/Simulation_' + str(simulation_counter + 1) + '/' + group_training + '/after_training_' + str(layer_freeze))
saving_folder = parent_folder + '/Simulation_' + str(simulation_counter + 1) + '/' + group_training + '/after_training_' + str(layer_freeze)
# The target stimuli
feature_sample_artiphysiology = np.zeros((num_sample_artiphysiology, 3), dtype = np.int64)
all_unit_activity_Conv2d_1 = np.zeros((num_sample_artiphysiology, 64, 55, 55), dtype = np.float32)
all_unit_activity_Conv2d_2 = np.zeros((num_sample_artiphysiology, 192, 27, 27), dtype = np.float32)
all_unit_activity_Conv2d_3 = np.zeros((num_sample_artiphysiology, 384, 13, 13), dtype = np.float32)
all_unit_activity_Conv2d_4 = np.zeros((num_sample_artiphysiology, 256, 13, 13), dtype = np.float32)
all_unit_activity_Conv2d_5 = np.zeros((num_sample_artiphysiology, 256, 13, 13), dtype = np.float32)
for i in range(num_sample_artiphysiology):
feature_sample_artiphysiology[i, :] = [SF_transfer[x_sample_artiphysiology_index[i, 0]], Ori_transfer[x_sample_artiphysiology_index[i, 1]], x_sample_artiphysiology_index[i, 2]]
index = torch.tensor(z_val_transfer[x_sample_artiphysiology_index[i, 0], x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]], dtype = torch.long)
x_sample = torch.index_select(x_tensor_transfer, 0, index)
x_sample = x_sample.cuda(gpu)
unit_activity_layer_0 = model.features[0](x_sample)
unit_activity_layer_1 = model.features[1](unit_activity_layer_0)
unit_activity_layer_2 = model.features[2](unit_activity_layer_1)
unit_activity_layer_3 = model.features[3](unit_activity_layer_2)
unit_activity_layer_4 = model.features[4](unit_activity_layer_3)
unit_activity_layer_5 = model.features[5](unit_activity_layer_4)
unit_activity_layer_6 = model.features[6](unit_activity_layer_5)
unit_activity_layer_7 = model.features[7](unit_activity_layer_6)
unit_activity_layer_8 = model.features[8](unit_activity_layer_7)
unit_activity_layer_9 = model.features[9](unit_activity_layer_8)
unit_activity_layer_10 = model.features[10](unit_activity_layer_9)
unit_activity_layer_11 = model.features[11](unit_activity_layer_10)
unit_activity_layer_12 = model.features[12](unit_activity_layer_11)
all_unit_activity_Conv2d_1[i, :] = unit_activity_layer_0[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_2[i, :] = unit_activity_layer_3[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_3[i, :] = unit_activity_layer_6[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_4[i, :] = unit_activity_layer_8[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_5[i, :] = unit_activity_layer_10[0].detach().cpu().clone().numpy()
# Saving the properties of sample stimuli used for calculating intrinsic dimension
scipy.io.savemat(saving_folder + '/feature_sample_artiphysiology.mat', mdict = {'feature_sample_artiphysiology': feature_sample_artiphysiology})
### Calculating the variance explained by PCA
PCA_layer_1 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_1.reshape(num_sample_artiphysiology, -1))
PCA_layer_2 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_2.reshape(num_sample_artiphysiology, -1))
PCA_layer_3 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_3.reshape(num_sample_artiphysiology, -1))
PCA_layer_4 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_4.reshape(num_sample_artiphysiology, -1))
PCA_layer_5 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_5.reshape(num_sample_artiphysiology, -1))
all_PCA_explained_variance_layer_1[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_1.explained_variance_ratio_
all_PCA_explained_variance_layer_2[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_2.explained_variance_ratio_
all_PCA_explained_variance_layer_3[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_3.explained_variance_ratio_
all_PCA_explained_variance_layer_4[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_4.explained_variance_ratio_
all_PCA_explained_variance_layer_5[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_5.explained_variance_ratio_
### Calculating the mutual information of original and nuisance stimuli with layers' activities
# The indices of consecutive convolutional layers: (0, 3, 6, 8, 10)
# The sizes of consecutive convolutional layers: (55, 27, 13, 13, 13)
# The positions of central units of consecutive convolutional layers: (27, 13, 6, 6, 6)
# The number of channels of consecutive convolutional layers: (64, 192, 384, 256, 256)
phase_count = 20
counter = -1
x_tensor_training_original = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 3, 224, 224), dtype = np.float32)
x_tensor_training_noise = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 3, 224, 224), dtype = np.float32)
all_unit_activity_MI_Conv2d_1 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 64, 55, 55), dtype = np.float32)
all_unit_activity_MI_Conv2d_2 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 192, 27, 27), dtype = np.float32)
all_unit_activity_MI_Conv2d_3 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 384, 13, 13), dtype = np.float32)
all_unit_activity_MI_Conv2d_4 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 256, 13, 13), dtype = np.float32)
all_unit_activity_MI_Conv2d_5 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 256, 13, 13), dtype = np.float32)
for i in range(len(SF_training)):
for j in range(len(Ori_training)):
phase = np.random.permutation(180)[:phase_count]
for k in range(phase_count):
counter = counter + 1
indices_training_1 = torch.tensor(z_val_training[i, j, phase[k]], dtype = torch.long)
indices_training_2 = torch.tensor(z_val_training[int(len(SF_training) / 2 + 0.5) - 1, j, phase[k]], dtype = torch.long)
x_sample = torch.index_select(x_tensor_training, 0, indices_training_1)
x_sample = x_sample.cuda(gpu)
unit_activity_layer_0 = model.features[0](x_sample)
unit_activity_layer_1 = model.features[1](unit_activity_layer_0)
unit_activity_layer_2 = model.features[2](unit_activity_layer_1)
unit_activity_layer_3 = model.features[3](unit_activity_layer_2)
unit_activity_layer_4 = model.features[4](unit_activity_layer_3)
unit_activity_layer_5 = model.features[5](unit_activity_layer_4)
unit_activity_layer_6 = model.features[6](unit_activity_layer_5)
unit_activity_layer_7 = model.features[7](unit_activity_layer_6)
unit_activity_layer_8 = model.features[8](unit_activity_layer_7)
unit_activity_layer_9 = model.features[9](unit_activity_layer_8)
unit_activity_layer_10 = model.features[10](unit_activity_layer_9)
unit_activity_layer_11 = model.features[11](unit_activity_layer_10)
unit_activity_layer_12 = model.features[12](unit_activity_layer_11)
x_tensor_training_original[counter, :] = torch.index_select(x_tensor_training, 0, indices_training_1).detach().cpu().clone().numpy()
x_tensor_training_noise[counter, :] = (torch.index_select(x_tensor_training, 0, indices_training_1) - torch.index_select(x_tensor_training, 0, indices_training_2)).cuda(gpu)[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_1[counter, :] = unit_activity_layer_0[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_2[counter, :] = unit_activity_layer_3[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_3[counter, :] = unit_activity_layer_6[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_4[counter, :] = unit_activity_layer_8[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_5[counter, :] = unit_activity_layer_10[0].detach().cpu().clone().numpy()
### Calculating the mutual information between the original stimuli and layers activities
all_simulation_all_MI_original[simulation_counter, group_counter, 0, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_1.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_original[simulation_counter, group_counter, 1, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_2.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_original[simulation_counter, group_counter, 2, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_3.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_original[simulation_counter, group_counter, 3, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_4.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_original[simulation_counter, group_counter, 4, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_5.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
### Calculating the mutual information between the nuisance stimuli and layers activities
all_simulation_all_MI_noise[simulation_counter, group_counter, 0, layer_freeze_counter] = EDGE(x_tensor_training_noise.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_1.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_noise[simulation_counter, group_counter, 1, layer_freeze_counter] = EDGE(x_tensor_training_noise.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_2.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_noise[simulation_counter, group_counter, 2, layer_freeze_counter] = EDGE(x_tensor_training_noise.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_3.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_noise[simulation_counter, group_counter, 3, layer_freeze_counter] = EDGE(x_tensor_training_noise.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_4.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_noise[simulation_counter, group_counter, 4, layer_freeze_counter] = EDGE(x_tensor_training_noise.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_5.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
### Training with Permuted Labels
if layer_freeze == None or layer_freeze == 0 or layer_freeze == 10:
print('Training with Permuted Labels')
# Read the reference image
file_name_path_ref = glob.glob(os.path.dirname(os.path.abspath("./")) + '/data/stimuli/reference_stimulus.TIFF')
# Define the main reference variable
x_val_ref = np.zeros((224, 224, 3), dtype = np.float32)
x_tensor_ref = []
# Load image
img = Image.open(file_name_path_ref[0]).convert('RGB')
# Resize image
width, height = img.size
new_width = width * 256 // min(img.size)
new_height = height * 256 // min(img.size)
img = img.resize((new_width, new_height), Image.BILINEAR)
# Center crop image
width, height = img.size
startx = width // 2 - (224 // 2)
starty = height // 2 - (224 // 2)
img = np.asarray(img).reshape(height, width, 3)
img = img[starty:starty + 224, startx:startx + 224]
assert img.shape[0] == 224 and img.shape[1] == 224, (img.shape, height, width)
# Save image
x_val_ref[:, :, :] = img[:, :, :]
# Convert image to tensor, then normalize and copy it
x_temp = torch.from_numpy(np.transpose(x_val_ref[:, :, :], (2, 0, 1)))
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
for i in range(len(SF_training) * len(Ori_training)):
x_tensor_ref.append(normalize(x_temp))
x_tensor_ref = torch.stack(x_tensor_ref)
print(x_tensor_ref.shape)
# Load the PyTorch model
model = DNNforVPL()
model_dict = model.state_dict()
# Filter out unnecessary keys
pretrained_dict_model = {k : v for k, v in pretrained_dict.items() if k in model_dict}
# Overwrite entries in the existing state dict
model_dict.update(pretrained_dict_model)
# Load the new state dict
model.load_state_dict(model_dict)
# Initialize by zero the weights of the fully-connected layer of the model
nn.init.zeros_(model.classifier[0].weight)
nn.init.zeros_(model.classifier[0].bias)
# Set all the parameters of the model to be trainable
for param in model.parameters():
param.requires_grad = True
if layer_freeze != None:
model.features[layer_freeze].weight.requires_grad = False
model.features[layer_freeze].bias.requires_grad = False
# Send the model to GPU/CPU
model = model.to(device)
cudnn.benchmark = True
# Define the main learning parameters
lr = 0.00001
momentum = 0.9
weight_decay = 0.0001
# Define the loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = torch.optim.SGD(model.parameters(), lr, momentum = momentum, weight_decay = weight_decay)
# Define the main training parameters
start_session = 0
sessions = 1
# Random permutation of labels
y_tensor_training_permuted = copy.deepcopy(y_tensor_training)
idx = torch.randperm(y_tensor_training_permuted.nelement())
y_tensor_training_permuted = y_tensor_training_permuted.view(-1)[idx].view(y_tensor_training_permuted.size())
for session in range(start_session, sessions):
# Adjust the learning rate
adjust_learning_rate(optimizer, session, lr)
# Train on the training set
epochs = 180
ID_counter = 0
for epoch in range(epochs):
z_val_shuffle_1D = np.unique(z_val_shuffle[:, :, epoch])
indices = torch.tensor(z_val_shuffle_1D, dtype = torch.long)
x_train = torch.index_select(x_tensor_training, 0, indices)
y_train = torch.index_select(y_tensor_training_permuted, 0, indices)
y_train = y_train.squeeze(1)
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Accuracy', ':6.2f')
progress = ProgressMeter(epochs, [batch_time, losses, top1], prefix = ("Training >>> Session: " + str(session) + " Epoch: [{}]").format(epoch))
# Switch to training mode
model.train()
with torch.set_grad_enabled(True):
end = time.time()
x_ref = x_tensor_ref.cuda(gpu)
x_train = x_train.cuda(gpu)
y_train = y_train.cuda(gpu)
# Compute output
output = model(x_train, x_ref)
loss = criterion(output, y_train)
# Measure accuracy and record loss
acc1 = accuracy(output, y_train, topk = 1)
losses.update(loss.item(), x_train.size(0))
top1.update(acc1[0], x_train.size(0))
# Compute gradient and perform SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Save the validation accuracy for plotting
all_simulation_training_accuracy_permuted[simulation_counter, group_counter, layer_freeze_counter, epoch] = acc1[0].item()
# Measure elapsed time
batch_time.update(time.time() - end)
progress.display(epoch)
# Remember the best accuracy
is_best = all_simulation_training_accuracy_permuted[simulation_counter, group_counter, layer_freeze_counter, epoch] >= best_acc1
best_acc1 = max(all_simulation_training_accuracy_permuted[simulation_counter, group_counter, layer_freeze_counter, epoch], best_acc1)
if epoch % 10 == 0:
ID_counter = ID_counter + 1
for i in range(num_sample_artiphysiology):
feature_sample_artiphysiology[i, :] = [SF_transfer[x_sample_artiphysiology_index[i, 0]], Ori_transfer[x_sample_artiphysiology_index[i, 1]], x_sample_artiphysiology_index[i, 2]]
index = torch.tensor(z_val_transfer[x_sample_artiphysiology_index[i, 0], x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]], dtype = torch.long)
x_sample = torch.index_select(x_tensor_transfer, 0, index)
x_sample = x_sample.cuda(gpu)
unit_activity_layer_0 = model.features[0](x_sample)
unit_activity_layer_1 = model.features[1](unit_activity_layer_0)
unit_activity_layer_2 = model.features[2](unit_activity_layer_1)
unit_activity_layer_3 = model.features[3](unit_activity_layer_2)
unit_activity_layer_4 = model.features[4](unit_activity_layer_3)
unit_activity_layer_5 = model.features[5](unit_activity_layer_4)
unit_activity_layer_6 = model.features[6](unit_activity_layer_5)
unit_activity_layer_7 = model.features[7](unit_activity_layer_6)
unit_activity_layer_8 = model.features[8](unit_activity_layer_7)
unit_activity_layer_9 = model.features[9](unit_activity_layer_8)
unit_activity_layer_10 = model.features[10](unit_activity_layer_9)
unit_activity_layer_11 = model.features[11](unit_activity_layer_10)
unit_activity_layer_12 = model.features[12](unit_activity_layer_11)
all_unit_activity_Conv2d_1[i, :] = unit_activity_layer_0[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_2[i, :] = unit_activity_layer_3[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_3[i, :] = unit_activity_layer_6[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_4[i, :] = unit_activity_layer_8[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_5[i, :] = unit_activity_layer_10[0].detach().cpu().clone().numpy()
### Calculating the intrinsic dimension
all_simulation_all_ID_permuted[simulation_counter, group_counter, 0, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_1.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 1, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_2.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 2, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_3.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 3, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_4.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
all_simulation_all_ID_permuted[simulation_counter, group_counter, 4, layer_freeze_counter, ID_counter] = estimate(squareform(pdist(all_unit_activity_Conv2d_5.reshape(num_sample_artiphysiology, -1)), 'euclidean'), fraction = 1.0)[2]
### Saving the main variables
scipy.io.savemat(parent_folder + '/all_simulation_training_accuracy.mat', mdict = {'all_simulation_training_accuracy': all_simulation_training_accuracy})
scipy.io.savemat(parent_folder + '/all_simulation_transfer_accuracy.mat', mdict = {'all_simulation_transfer_accuracy': all_simulation_transfer_accuracy})
scipy.io.savemat(parent_folder + '/all_simulation_all_MI_original.mat', mdict = {'all_simulation_all_MI_original': all_simulation_all_MI_original})
scipy.io.savemat(parent_folder + '/all_simulation_all_MI_noise.mat', mdict = {'all_simulation_all_MI_noise': all_simulation_all_MI_noise})
scipy.io.savemat(parent_folder + '/all_simulation_all_ID.mat', mdict = {'all_simulation_all_ID': all_simulation_all_ID})
scipy.io.savemat(parent_folder + '/all_x_sample_ID.mat', mdict = {'all_x_sample_ID': all_x_sample_ID})
scipy.io.savemat(parent_folder + '/all_simulation_training_accuracy_permuted.mat', mdict = {'all_simulation_training_accuracy_permuted': all_simulation_training_accuracy_permuted})
scipy.io.savemat(parent_folder + '/all_simulation_all_ID_permuted.mat', mdict = {'all_simulation_all_ID_permuted': all_simulation_all_ID_permuted})
scipy.io.savemat(parent_folder + '/all_PCA_explained_variance_layer_1.mat', mdict = {'all_PCA_explained_variance_layer_1': all_PCA_explained_variance_layer_1})
scipy.io.savemat(parent_folder + '/all_PCA_explained_variance_layer_2.mat', mdict = {'all_PCA_explained_variance_layer_2': all_PCA_explained_variance_layer_2})
scipy.io.savemat(parent_folder + '/all_PCA_explained_variance_layer_3.mat', mdict = {'all_PCA_explained_variance_layer_3': all_PCA_explained_variance_layer_3})
scipy.io.savemat(parent_folder + '/all_PCA_explained_variance_layer_4.mat', mdict = {'all_PCA_explained_variance_layer_4': all_PCA_explained_variance_layer_4})
scipy.io.savemat(parent_folder + '/all_PCA_explained_variance_layer_5.mat', mdict = {'all_PCA_explained_variance_layer_5': all_PCA_explained_variance_layer_5})
scipy.io.savemat(parent_folder + '/all_simulation_weight_change_layer_1.mat', mdict = {'all_simulation_weight_change_layer_1': all_simulation_weight_change_layer_1})
scipy.io.savemat(parent_folder + '/all_simulation_weight_change_layer_2.mat', mdict = {'all_simulation_weight_change_layer_2': all_simulation_weight_change_layer_2})
scipy.io.savemat(parent_folder + '/all_simulation_weight_change_layer_3.mat', mdict = {'all_simulation_weight_change_layer_3': all_simulation_weight_change_layer_3})
scipy.io.savemat(parent_folder + '/all_simulation_weight_change_layer_4.mat', mdict = {'all_simulation_weight_change_layer_4': all_simulation_weight_change_layer_4})
scipy.io.savemat(parent_folder + '/all_simulation_weight_change_layer_5.mat', mdict = {'all_simulation_weight_change_layer_5': all_simulation_weight_change_layer_5})
scipy.io.savemat(parent_folder + '/all_simulation_layer_rotation_layer_1.mat', mdict = {'all_simulation_layer_rotation_layer_1': all_simulation_layer_rotation_layer_1})
scipy.io.savemat(parent_folder + '/all_simulation_layer_rotation_layer_2.mat', mdict = {'all_simulation_layer_rotation_layer_2': all_simulation_layer_rotation_layer_2})
scipy.io.savemat(parent_folder + '/all_simulation_layer_rotation_layer_3.mat', mdict = {'all_simulation_layer_rotation_layer_3': all_simulation_layer_rotation_layer_3})
scipy.io.savemat(parent_folder + '/all_simulation_layer_rotation_layer_4.mat', mdict = {'all_simulation_layer_rotation_layer_4': all_simulation_layer_rotation_layer_4})
scipy.io.savemat(parent_folder + '/all_simulation_layer_rotation_layer_5.mat', mdict = {'all_simulation_layer_rotation_layer_5': all_simulation_layer_rotation_layer_5}) | [
"reading_stimuli.reading_stimuli",
"torch.nn.CrossEntropyLoss",
"torch.pow",
"torch.cuda.is_available",
"copy.deepcopy",
"torch.set_grad_enabled",
"sklearn.decomposition.PCA",
"numpy.asarray",
"torch.nn.init.zeros_",
"torch.hub.load_state_dict_from_url",
"os.mkdir",
"numpy.random.permutation",... | [((847, 894), 'torch.hub.load_state_dict_from_url', 'load_state_dict_from_url', (["model_urls['alexnet']"], {}), "(model_urls['alexnet'])\n", (871, 894), False, 'from torch.hub import load_state_dict_from_url\n'), ((3680, 3707), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (3690, 3707), False, 'import torch\n'), ((4171, 4227), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 3)'], {'dtype': 'np.int64'}), '((num_sample_artiphysiology, 3), dtype=np.int64)\n', (4179, 4227), True, 'import numpy as np\n'), ((4615, 4707), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (4623, 4707), True, 'import numpy as np\n'), ((4745, 4836), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 10)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 10), dtype=\n np.float32)\n', (4753, 4836), True, 'import numpy as np\n'), ((4872, 4972), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer, number_layer_freeze)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer,\n number_layer_freeze), dtype=np.float32)\n', (4880, 4972), True, 'import numpy as np\n'), ((5006, 5106), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer, number_layer_freeze)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer,\n number_layer_freeze), dtype=np.float32)\n', (5014, 5106), True, 'import numpy as np\n'), ((5134, 5238), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer, number_layer_freeze, 19)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer,\n number_layer_freeze, 19), dtype=np.float32)\n', (5142, 5238), True, 'import numpy as np\n'), ((5260, 5321), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group)'], {'dtype': 'np.float32'}), '((number_simulation, number_group), dtype=np.float32)\n', (5268, 5321), True, 'import numpy as np\n'), ((5379, 5471), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (5387, 5471), True, 'import numpy as np\n'), ((5507, 5611), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer, number_layer_freeze, 19)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer,\n number_layer_freeze, 19), dtype=np.float32)\n', (5515, 5611), True, 'import numpy as np\n'), ((5662, 5770), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, number_PCA_component)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze,\n number_PCA_component), dtype=np.float32)\n', (5670, 5770), True, 'import numpy as np\n'), ((5811, 5919), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, number_PCA_component)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze,\n number_PCA_component), dtype=np.float32)\n', (5819, 5919), True, 'import numpy as np\n'), ((5960, 6068), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, number_PCA_component)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze,\n number_PCA_component), dtype=np.float32)\n', (5968, 6068), True, 'import numpy as np\n'), ((6109, 6217), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, number_PCA_component)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze,\n number_PCA_component), dtype=np.float32)\n', (6117, 6217), True, 'import numpy as np\n'), ((6258, 6366), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, number_PCA_component)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze,\n number_PCA_component), dtype=np.float32)\n', (6266, 6366), True, 'import numpy as np\n'), ((6415, 6507), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (6423, 6507), True, 'import numpy as np\n'), ((6549, 6641), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (6557, 6641), True, 'import numpy as np\n'), ((6683, 6775), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (6691, 6775), True, 'import numpy as np\n'), ((6817, 6909), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (6825, 6909), True, 'import numpy as np\n'), ((6951, 7043), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (6959, 7043), True, 'import numpy as np\n'), ((7092, 7184), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (7100, 7184), True, 'import numpy as np\n'), ((7227, 7319), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (7235, 7319), True, 'import numpy as np\n'), ((7362, 7454), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (7370, 7454), True, 'import numpy as np\n'), ((7497, 7589), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (7505, 7589), True, 'import numpy as np\n'), ((7632, 7724), 'numpy.zeros', 'np.zeros', (['(number_simulation, number_group, number_layer_freeze, 180)'], {'dtype': 'np.float32'}), '((number_simulation, number_group, number_layer_freeze, 180), dtype\n =np.float32)\n', (7640, 7724), True, 'import numpy as np\n'), ((7733, 7756), 'os.mkdir', 'os.mkdir', (['parent_folder'], {}), '(parent_folder)\n', (7741, 7756), False, 'import os\n'), ((2800, 2815), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2813, 2815), False, 'import torch\n'), ((3734, 3799), 'shutil.copyfile', 'shutil.copyfile', (['filename', "('DNNforVPL_best_' + group + '.pth.tar')"], {}), "(filename, 'DNNforVPL_best_' + group + '.pth.tar')\n", (3749, 3799), False, 'import shutil\n'), ((4334, 4353), 'random.randrange', 'random.randrange', (['(1)'], {}), '(1)\n', (4350, 4353), False, 'import random\n'), ((4401, 4421), 'random.randrange', 'random.randrange', (['(20)'], {}), '(20)\n', (4417, 4421), False, 'import random\n'), ((4469, 4490), 'random.randrange', 'random.randrange', (['(180)'], {}), '(180)\n', (4485, 4490), False, 'import random\n'), ((8109, 8121), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8119, 8121), False, 'import gc\n'), ((10441, 10573), 'reading_stimuli.reading_stimuli', 'reading_stimuli', ([], {'file_names': 'file_names', 'file_name_paths': 'file_name_paths', 'orientation': 'Ori_training', 'spatial_frequency': 'SF_training'}), '(file_names=file_names, file_name_paths=file_name_paths,\n orientation=Ori_training, spatial_frequency=SF_training)\n', (10456, 10573), False, 'from reading_stimuli import reading_stimuli\n'), ((10629, 10659), 'torch.stack', 'torch.stack', (['x_tensor_training'], {}), '(x_tensor_training)\n', (10640, 10659), False, 'import torch\n'), ((10693, 10723), 'torch.stack', 'torch.stack', (['y_tensor_training'], {}), '(y_tensor_training)\n', (10704, 10723), False, 'import torch\n'), ((12977, 13109), 'reading_stimuli.reading_stimuli', 'reading_stimuli', ([], {'file_names': 'file_names', 'file_name_paths': 'file_name_paths', 'orientation': 'Ori_transfer', 'spatial_frequency': 'SF_transfer'}), '(file_names=file_names, file_name_paths=file_name_paths,\n orientation=Ori_transfer, spatial_frequency=SF_transfer)\n', (12992, 13109), False, 'from reading_stimuli import reading_stimuli\n'), ((13165, 13195), 'torch.stack', 'torch.stack', (['x_tensor_transfer'], {}), '(x_tensor_transfer)\n', (13176, 13195), False, 'import torch\n'), ((13229, 13259), 'torch.stack', 'torch.stack', (['y_tensor_transfer'], {}), '(y_tensor_transfer)\n', (13240, 13259), False, 'import torch\n'), ((10281, 10300), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (10297, 10300), False, 'import os\n'), ((12817, 12836), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (12833, 12836), False, 'import os\n'), ((13886, 13927), 'numpy.zeros', 'np.zeros', (['(224, 224, 3)'], {'dtype': 'np.float32'}), '((224, 224, 3), dtype=np.float32)\n', (13894, 13927), True, 'import numpy as np\n'), ((15103, 15178), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (15123, 15178), True, 'import torchvision.transforms as transforms\n'), ((15386, 15411), 'torch.stack', 'torch.stack', (['x_tensor_ref'], {}), '(x_tensor_ref)\n', (15397, 15411), False, 'import torch\n'), ((15859, 15870), 'original_variant_AlexNet_model.DNNforVPL', 'DNNforVPL', ([], {}), '()\n', (15868, 15870), False, 'from original_variant_AlexNet_model import DNNforVPL\n'), ((16450, 16492), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['model.classifier[0].weight'], {}), '(model.classifier[0].weight)\n', (16464, 16492), True, 'import torch.nn as nn\n'), ((16510, 16550), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['model.classifier[0].bias'], {}), '(model.classifier[0].bias)\n', (16524, 16550), True, 'import torch.nn as nn\n'), ((25007, 25046), 'copy.deepcopy', 'copy.deepcopy', (['model.features[0].weight'], {}), '(model.features[0].weight)\n', (25020, 25046), False, 'import copy\n'), ((25077, 25116), 'copy.deepcopy', 'copy.deepcopy', (['model.features[3].weight'], {}), '(model.features[3].weight)\n', (25090, 25116), False, 'import copy\n'), ((25147, 25186), 'copy.deepcopy', 'copy.deepcopy', (['model.features[6].weight'], {}), '(model.features[6].weight)\n', (25160, 25186), False, 'import copy\n'), ((25217, 25256), 'copy.deepcopy', 'copy.deepcopy', (['model.features[8].weight'], {}), '(model.features[8].weight)\n', (25230, 25256), False, 'import copy\n'), ((25287, 25327), 'copy.deepcopy', 'copy.deepcopy', (['model.features[10].weight'], {}), '(model.features[10].weight)\n', (25300, 25327), False, 'import copy\n'), ((25520, 25549), 'copy.deepcopy', 'copy.deepcopy', (['z_val_training'], {}), '(z_val_training)\n', (25533, 25549), False, 'import copy\n'), ((36345, 36386), 'numpy.zeros', 'np.zeros', (['(224, 224, 3)'], {'dtype': 'np.float32'}), '((224, 224, 3), dtype=np.float32)\n', (36353, 36386), True, 'import numpy as np\n'), ((37562, 37637), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (37582, 37637), True, 'import torchvision.transforms as transforms\n'), ((37845, 37870), 'torch.stack', 'torch.stack', (['x_tensor_ref'], {}), '(x_tensor_ref)\n', (37856, 37870), False, 'import torch\n'), ((42633, 42689), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 3)'], {'dtype': 'np.int64'}), '((num_sample_artiphysiology, 3), dtype=np.int64)\n', (42641, 42689), True, 'import numpy as np\n'), ((42756, 42823), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 64, 55, 55)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 64, 55, 55), dtype=np.float32)\n', (42764, 42823), True, 'import numpy as np\n'), ((42872, 42940), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 192, 27, 27)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 192, 27, 27), dtype=np.float32)\n', (42880, 42940), True, 'import numpy as np\n'), ((42989, 43057), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 384, 13, 13)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 384, 13, 13), dtype=np.float32)\n', (42997, 43057), True, 'import numpy as np\n'), ((43106, 43174), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 256, 13, 13)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 256, 13, 13), dtype=np.float32)\n', (43114, 43174), True, 'import numpy as np\n'), ((43223, 43291), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 256, 13, 13)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 256, 13, 13), dtype=np.float32)\n', (43231, 43291), True, 'import numpy as np\n'), ((15029, 15072), 'numpy.transpose', 'np.transpose', (['x_val_ref[:, :, :]', '(2, 0, 1)'], {}), '(x_val_ref[:, :, :], (2, 0, 1))\n', (15041, 15072), True, 'import numpy as np\n'), ((18218, 18274), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 3)'], {'dtype': 'np.int64'}), '((num_sample_artiphysiology, 3), dtype=np.int64)\n', (18226, 18274), True, 'import numpy as np\n'), ((18335, 18403), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 3, 224, 224)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 3, 224, 224), dtype=np.float32)\n', (18343, 18403), True, 'import numpy as np\n'), ((18478, 18545), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 64, 55, 55)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 64, 55, 55), dtype=np.float32)\n', (18486, 18545), True, 'import numpy as np\n'), ((18598, 18666), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 192, 27, 27)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 192, 27, 27), dtype=np.float32)\n', (18606, 18666), True, 'import numpy as np\n'), ((18719, 18787), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 384, 13, 13)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 384, 13, 13), dtype=np.float32)\n', (18727, 18787), True, 'import numpy as np\n'), ((18840, 18908), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 256, 13, 13)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 256, 13, 13), dtype=np.float32)\n', (18848, 18908), True, 'import numpy as np\n'), ((18961, 19029), 'numpy.zeros', 'np.zeros', (['(num_sample_artiphysiology, 256, 13, 13)'], {'dtype': 'np.float32'}), '((num_sample_artiphysiology, 256, 13, 13), dtype=np.float32)\n', (18969, 19029), True, 'import numpy as np\n'), ((37488, 37531), 'numpy.transpose', 'np.transpose', (['x_val_ref[:, :, :]', '(2, 0, 1)'], {}), '(x_val_ref[:, :, :], (2, 0, 1))\n', (37500, 37531), True, 'import numpy as np\n'), ((38917, 38946), 'copy.deepcopy', 'copy.deepcopy', (['z_val_transfer'], {}), '(z_val_transfer)\n', (38930, 38946), False, 'import copy\n'), ((39264, 39303), 'numpy.unique', 'np.unique', (['z_val_shuffle[:, :, session]'], {}), '(z_val_shuffle[:, :, session])\n', (39273, 39303), True, 'import numpy as np\n'), ((39335, 39383), 'torch.tensor', 'torch.tensor', (['z_val_shuffle_1D'], {'dtype': 'torch.long'}), '(z_val_shuffle_1D, dtype=torch.long)\n', (39347, 39383), False, 'import torch\n'), ((39417, 39466), 'torch.index_select', 'torch.index_select', (['x_tensor_transfer', '(0)', 'indices'], {}), '(x_tensor_transfer, 0, indices)\n', (39435, 39466), False, 'import torch\n'), ((39498, 39547), 'torch.index_select', 'torch.index_select', (['y_tensor_transfer', '(0)', 'indices'], {}), '(y_tensor_transfer, 0, indices)\n', (39516, 39547), False, 'import torch\n'), ((43649, 43815), 'torch.tensor', 'torch.tensor', (['z_val_transfer[x_sample_artiphysiology_index[i, 0],\n x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]]'], {'dtype': 'torch.long'}), '(z_val_transfer[x_sample_artiphysiology_index[i, 0],\n x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2\n ]], dtype=torch.long)\n', (43661, 43815), False, 'import torch\n'), ((43841, 43888), 'torch.index_select', 'torch.index_select', (['x_tensor_transfer', '(0)', 'index'], {}), '(x_tensor_transfer, 0, index)\n', (43859, 43888), False, 'import torch\n'), ((59607, 59648), 'numpy.zeros', 'np.zeros', (['(224, 224, 3)'], {'dtype': 'np.float32'}), '((224, 224, 3), dtype=np.float32)\n', (59615, 59648), True, 'import numpy as np\n'), ((60924, 60999), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (60944, 60999), True, 'import torchvision.transforms as transforms\n'), ((61227, 61252), 'torch.stack', 'torch.stack', (['x_tensor_ref'], {}), '(x_tensor_ref)\n', (61238, 61252), False, 'import torch\n'), ((61397, 61408), 'original_variant_AlexNet_model.DNNforVPL', 'DNNforVPL', ([], {}), '()\n', (61406, 61408), False, 'from original_variant_AlexNet_model import DNNforVPL\n'), ((62036, 62078), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['model.classifier[0].weight'], {}), '(model.classifier[0].weight)\n', (62050, 62078), True, 'import torch.nn as nn\n'), ((62100, 62140), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['model.classifier[0].bias'], {}), '(model.classifier[0].bias)\n', (62114, 62140), True, 'import torch.nn as nn\n'), ((63527, 63559), 'copy.deepcopy', 'copy.deepcopy', (['y_tensor_training'], {}), '(y_tensor_training)\n', (63540, 63559), False, 'import copy\n'), ((14036, 14069), 'PIL.Image.open', 'Image.open', (['file_name_path_ref[0]'], {}), '(file_name_path_ref[0])\n', (14046, 14069), False, 'from PIL import Image\n'), ((14591, 14606), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (14601, 14606), True, 'import numpy as np\n'), ((15673, 15698), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15696, 15698), False, 'import torch\n'), ((19407, 19573), 'torch.tensor', 'torch.tensor', (['z_val_transfer[x_sample_artiphysiology_index[i, 0],\n x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]]'], {'dtype': 'torch.long'}), '(z_val_transfer[x_sample_artiphysiology_index[i, 0],\n x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2\n ]], dtype=torch.long)\n', (19419, 19573), False, 'import torch\n'), ((19603, 19650), 'torch.index_select', 'torch.index_select', (['x_tensor_transfer', '(0)', 'index'], {}), '(x_tensor_transfer, 0, index)\n', (19621, 19650), False, 'import torch\n'), ((24720, 24741), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (24739, 24741), True, 'import torch.nn as nn\n'), ((25704, 25742), 'random.shuffle', 'random.shuffle', (['z_val_shuffle[i, j, :]'], {}), '(z_val_shuffle[i, j, :])\n', (25718, 25742), False, 'import random\n'), ((26249, 26286), 'numpy.unique', 'np.unique', (['z_val_shuffle[:, :, epoch]'], {}), '(z_val_shuffle[:, :, epoch])\n', (26258, 26286), True, 'import numpy as np\n'), ((26322, 26370), 'torch.tensor', 'torch.tensor', (['z_val_shuffle_1D'], {'dtype': 'torch.long'}), '(z_val_shuffle_1D, dtype=torch.long)\n', (26334, 26370), False, 'import torch\n'), ((26408, 26457), 'torch.index_select', 'torch.index_select', (['x_tensor_training', '(0)', 'indices'], {}), '(x_tensor_training, 0, indices)\n', (26426, 26457), False, 'import torch\n'), ((26493, 26542), 'torch.index_select', 'torch.index_select', (['y_tensor_training', '(0)', 'indices'], {}), '(y_tensor_training, 0, indices)\n', (26511, 26542), False, 'import torch\n'), ((30599, 30651), 'layer_rotation.layer_rotation', 'layer_rotation', (['model.features[0].weight', 'Conv2d_1_0'], {}), '(model.features[0].weight, Conv2d_1_0)\n', (30613, 30651), False, 'from layer_rotation import layer_rotation\n'), ((30781, 30833), 'layer_rotation.layer_rotation', 'layer_rotation', (['model.features[3].weight', 'Conv2d_2_0'], {}), '(model.features[3].weight, Conv2d_2_0)\n', (30795, 30833), False, 'from layer_rotation import layer_rotation\n'), ((30963, 31015), 'layer_rotation.layer_rotation', 'layer_rotation', (['model.features[6].weight', 'Conv2d_3_0'], {}), '(model.features[6].weight, Conv2d_3_0)\n', (30977, 31015), False, 'from layer_rotation import layer_rotation\n'), ((31145, 31197), 'layer_rotation.layer_rotation', 'layer_rotation', (['model.features[8].weight', 'Conv2d_4_0'], {}), '(model.features[8].weight, Conv2d_4_0)\n', (31159, 31197), False, 'from layer_rotation import layer_rotation\n'), ((31327, 31380), 'layer_rotation.layer_rotation', 'layer_rotation', (['model.features[10].weight', 'Conv2d_5_0'], {}), '(model.features[10].weight, Conv2d_5_0)\n', (31341, 31380), False, 'from layer_rotation import layer_rotation\n'), ((36495, 36528), 'PIL.Image.open', 'Image.open', (['file_name_path_ref[0]'], {}), '(file_name_path_ref[0])\n', (36505, 36528), False, 'from PIL import Image\n'), ((37050, 37065), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (37060, 37065), True, 'import numpy as np\n'), ((38101, 38126), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (38124, 38126), False, 'import torch\n'), ((40117, 40132), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (40130, 40132), False, 'import torch\n'), ((40165, 40176), 'time.time', 'time.time', ([], {}), '()\n', (40174, 40176), False, 'import time\n'), ((46104, 46142), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'number_PCA_component'}), '(n_components=number_PCA_component)\n', (46107, 46142), False, 'from sklearn.decomposition import PCA\n'), ((46247, 46285), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'number_PCA_component'}), '(n_components=number_PCA_component)\n', (46250, 46285), False, 'from sklearn.decomposition import PCA\n'), ((46390, 46428), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'number_PCA_component'}), '(n_components=number_PCA_component)\n', (46393, 46428), False, 'from sklearn.decomposition import PCA\n'), ((46533, 46571), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'number_PCA_component'}), '(n_components=number_PCA_component)\n', (46536, 46571), False, 'from sklearn.decomposition import PCA\n'), ((46676, 46714), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'number_PCA_component'}), '(n_components=number_PCA_component)\n', (46679, 46714), False, 'from sklearn.decomposition import PCA\n'), ((60846, 60889), 'numpy.transpose', 'np.transpose', (['x_val_ref[:, :, :]', '(2, 0, 1)'], {}), '(x_val_ref[:, :, :], (2, 0, 1))\n', (60858, 60889), True, 'import numpy as np\n'), ((9967, 9988), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (9982, 9988), False, 'import os\n'), ((12499, 12520), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (12514, 12520), False, 'import os\n'), ((13719, 13740), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (13734, 13740), False, 'import os\n'), ((27161, 27189), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (27183, 27189), False, 'import torch\n'), ((27226, 27237), 'time.time', 'time.time', ([], {}), '()\n', (27235, 27237), False, 'import time\n'), ((36170, 36191), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (36185, 36191), False, 'import os\n'), ((39113, 39151), 'random.shuffle', 'random.shuffle', (['z_val_shuffle[j, k, :]'], {}), '(z_val_shuffle[j, k, :])\n', (39127, 39151), False, 'import random\n'), ((49396, 49422), 'numpy.random.permutation', 'np.random.permutation', (['(180)'], {}), '(180)\n', (49417, 49422), True, 'import numpy as np\n'), ((49648, 49710), 'torch.tensor', 'torch.tensor', (['z_val_training[i, j, phase[k]]'], {'dtype': 'torch.long'}), '(z_val_training[i, j, phase[k]], dtype=torch.long)\n', (49660, 49710), False, 'import torch\n'), ((49932, 49992), 'torch.index_select', 'torch.index_select', (['x_tensor_training', '(0)', 'indices_training_1'], {}), '(x_tensor_training, 0, indices_training_1)\n', (49950, 49992), False, 'import torch\n'), ((59773, 59806), 'PIL.Image.open', 'Image.open', (['file_name_path_ref[0]'], {}), '(file_name_path_ref[0])\n', (59783, 59806), False, 'from PIL import Image\n'), ((60376, 60391), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (60386, 60391), True, 'import numpy as np\n'), ((63090, 63111), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (63109, 63111), True, 'import torch.nn as nn\n'), ((64322, 64359), 'numpy.unique', 'np.unique', (['z_val_shuffle[:, :, epoch]'], {}), '(z_val_shuffle[:, :, epoch])\n', (64331, 64359), True, 'import numpy as np\n'), ((64399, 64447), 'torch.tensor', 'torch.tensor', (['z_val_shuffle_1D'], {'dtype': 'torch.long'}), '(z_val_shuffle_1D, dtype=torch.long)\n', (64411, 64447), False, 'import torch\n'), ((64489, 64538), 'torch.index_select', 'torch.index_select', (['x_tensor_training', '(0)', 'indices'], {}), '(x_tensor_training, 0, indices)\n', (64507, 64538), False, 'import torch\n'), ((64578, 64636), 'torch.index_select', 'torch.index_select', (['y_tensor_training_permuted', '(0)', 'indices'], {}), '(y_tensor_training_permuted, 0, indices)\n', (64596, 64636), False, 'import torch\n'), ((10172, 10193), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (10187, 10193), False, 'import os\n'), ((12706, 12727), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (12721, 12727), False, 'import os\n'), ((31966, 32132), 'torch.tensor', 'torch.tensor', (['z_val_transfer[x_sample_artiphysiology_index[i, 0],\n x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]]'], {'dtype': 'torch.long'}), '(z_val_transfer[x_sample_artiphysiology_index[i, 0],\n x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2\n ]], dtype=torch.long)\n', (31978, 32132), False, 'import torch\n'), ((32170, 32217), 'torch.index_select', 'torch.index_select', (['x_tensor_transfer', '(0)', 'index'], {}), '(x_tensor_transfer, 0, index)\n', (32188, 32217), False, 'import torch\n'), ((41167, 41178), 'time.time', 'time.time', ([], {}), '()\n', (41176, 41178), False, 'import time\n'), ((59428, 59449), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (59443, 59449), False, 'import os\n'), ((65299, 65327), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (65321, 65327), False, 'import torch\n'), ((65368, 65379), 'time.time', 'time.time', ([], {}), '()\n', (65377, 65379), False, 'import time\n'), ((28523, 28534), 'time.time', 'time.time', ([], {}), '()\n', (28532, 28534), False, 'import time\n'), ((67832, 67998), 'torch.tensor', 'torch.tensor', (['z_val_transfer[x_sample_artiphysiology_index[i, 0],\n x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]]'], {'dtype': 'torch.long'}), '(z_val_transfer[x_sample_artiphysiology_index[i, 0],\n x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2\n ]], dtype=torch.long)\n', (67844, 67998), False, 'import torch\n'), ((68040, 68087), 'torch.index_select', 'torch.index_select', (['x_tensor_transfer', '(0)', 'index'], {}), '(x_tensor_transfer, 0, index)\n', (68058, 68087), False, 'import torch\n'), ((66770, 66781), 'time.time', 'time.time', ([], {}), '()\n', (66779, 66781), False, 'import time\n'), ((29171, 29222), 'torch.pow', 'torch.pow', (['(model.features[0].weight - Conv2d_1_0)', '(2)'], {}), '(model.features[0].weight - Conv2d_1_0, 2)\n', (29180, 29222), False, 'import torch\n'), ((29252, 29290), 'torch.pow', 'torch.pow', (['model.features[0].weight', '(2)'], {}), '(model.features[0].weight, 2)\n', (29261, 29290), False, 'import torch\n'), ((29455, 29506), 'torch.pow', 'torch.pow', (['(model.features[3].weight - Conv2d_2_0)', '(2)'], {}), '(model.features[3].weight - Conv2d_2_0, 2)\n', (29464, 29506), False, 'import torch\n'), ((29536, 29574), 'torch.pow', 'torch.pow', (['model.features[3].weight', '(2)'], {}), '(model.features[3].weight, 2)\n', (29545, 29574), False, 'import torch\n'), ((29739, 29790), 'torch.pow', 'torch.pow', (['(model.features[6].weight - Conv2d_3_0)', '(2)'], {}), '(model.features[6].weight - Conv2d_3_0, 2)\n', (29748, 29790), False, 'import torch\n'), ((29820, 29858), 'torch.pow', 'torch.pow', (['model.features[6].weight', '(2)'], {}), '(model.features[6].weight, 2)\n', (29829, 29858), False, 'import torch\n'), ((30023, 30074), 'torch.pow', 'torch.pow', (['(model.features[8].weight - Conv2d_4_0)', '(2)'], {}), '(model.features[8].weight - Conv2d_4_0, 2)\n', (30032, 30074), False, 'import torch\n'), ((30104, 30142), 'torch.pow', 'torch.pow', (['model.features[8].weight', '(2)'], {}), '(model.features[8].weight, 2)\n', (30113, 30142), False, 'import torch\n'), ((30307, 30359), 'torch.pow', 'torch.pow', (['(model.features[10].weight - Conv2d_5_0)', '(2)'], {}), '(model.features[10].weight - Conv2d_5_0, 2)\n', (30316, 30359), False, 'import torch\n'), ((30389, 30428), 'torch.pow', 'torch.pow', (['model.features[10].weight', '(2)'], {}), '(model.features[10].weight, 2)\n', (30398, 30428), False, 'import torch\n'), ((51399, 51459), 'torch.index_select', 'torch.index_select', (['x_tensor_training', '(0)', 'indices_training_1'], {}), '(x_tensor_training, 0, indices_training_1)\n', (51417, 51459), False, 'import torch\n'), ((51559, 51619), 'torch.index_select', 'torch.index_select', (['x_tensor_training', '(0)', 'indices_training_1'], {}), '(x_tensor_training, 0, indices_training_1)\n', (51577, 51619), False, 'import torch\n'), ((51622, 51682), 'torch.index_select', 'torch.index_select', (['x_tensor_training', '(0)', 'indices_training_2'], {}), '(x_tensor_training, 0, indices_training_2)\n', (51640, 51682), False, 'import torch\n')] |
import numpy as np
import random
from ccnlab.baselines.core import Model, ValueBasedModel
class RescorlaWagner(ValueBasedModel):
def __init__(self, cs_dim, ctx_dim, alpha=0.3):
super().__init__()
self.alpha = alpha # Learning rate.
self.cs_dim = cs_dim
self.ctx_dim = ctx_dim
self.D = self.cs_dim + self.ctx_dim # Stimulus dimensions: punctate and contextual cues.
self.reset()
def reset(self):
self.w = np.zeros((self.D,))
def _value(self, cs, ctx, us, t):
x = np.array(cs + ctx)
v = self.w.dot(x) # Value before update.
self._update(x=x, r=us)
return v
def _update(self, x, r):
rpe = r - self.w.dot(x) # Reward prediction error.
self.w = self.w + self.alpha * rpe * x # Weight update.
class TemporalDifference(ValueBasedModel):
def __init__(self, cs_dim, ctx_dim, num_timesteps, alpha=0.3, gamma=0.98):
super().__init__()
self.alpha = alpha # Learning rate.
self.gamma = gamma # Discount factor.
self.cs_dim = cs_dim
self.ctx_dim = ctx_dim
self.T = num_timesteps
self.D = self.cs_dim + self.ctx_dim # Stimulus dimensions: punctate and contextual cues.
self.reset()
def reset(self):
self.w = np.zeros((self.D * self.T,))
self.last_x = np.zeros((self.D * self.T,)) # Previous input.
self.last_r = 0 # Previous reward.
def _value(self, cs, ctx, us, t):
if t == 0:
self.last_x = np.zeros((self.D * self.T,)) # No previous input at initial timestep.
x = np.zeros((self.D * self.T,))
x[t * self.D:(t + 1) * self.D] = cs + ctx # Complete serial compound representation.
v = self.w.dot(x) # Value before update.
self._update(x=x, r=us)
if t + 1 == self.T:
self._update(x=np.zeros((self.D * self.T,)), r=0) # Perform update with the last seen input.
return v
def _update(self, x, r):
# Update for the previous input, because we don't have access to the next input.
last_rpe = self.last_r + self.gamma * self.w.dot(x) - self.w.dot(self.last_x)
self.w = self.w + self.alpha * last_rpe * self.last_x # Weight update.
self.last_x = x
self.last_r = r
class KalmanFilter(ValueBasedModel):
def __init__(self, cs_dim, ctx_dim, tau2=0.01, sigma_r2=1, sigma_w2=1):
super().__init__()
self.cs_dim = cs_dim
self.ctx_dim = ctx_dim
self.D = self.cs_dim + self.ctx_dim # Stimulus dimensions: punctate and contextual cues.
self.tau2 = tau2 # Diffusion/transition variance.
self.sigma_r2 = sigma_r2 # Noise variance.
self.sigma_w2 = sigma_w2 # Prior variance.
self.Q = self.tau2 * np.identity(self.D) # Transition covariance.
self.reset()
def reset(self):
self.w = np.zeros((self.D,)) # Mean weights.
self.S = self.sigma_w2 * np.identity(self.D) # Weight covariance.
def _value(self, cs, ctx, us, t):
x = np.array(cs + ctx)
v = self.w.dot(x) # Value before update.
self._update(x=x, r=us)
return v
def _update(self, x, r):
rpe = r - self.w.dot(x) # Reward prediction error.
S = self.S + self.Q # Prior covariance.
R = x.dot(S).dot(x) + self.sigma_r2 # Residual covariance.
k = S.dot(x) / R # Kalman gain.
self.w = self.w + k * rpe # Weight update.
self.S = S - k.dot(x) * S # Posterior covariance.
class RandomModel(Model):
"""Produces response with probability that changes linearly with each US."""
def __init__(self, start=0.2, delta=0.1, min_prob=0.1, max_prob=0.9):
self.prob = start
self.start = start
self.delta = delta
self.min_prob = min_prob
self.max_prob = max_prob
def reset(self):
self.prob = self.start
def act(self, cs, ctx, us, t):
if us > 0:
self.prob = max(min(self.prob + self.delta, self.max_prob), self.min_prob)
return 1
if len(cs) > 0:
return random.choices([1, 0], weights=[self.prob, 1 - self.prob])[0]
return 0
| [
"numpy.identity",
"numpy.array",
"numpy.zeros",
"random.choices"
] | [((441, 460), 'numpy.zeros', 'np.zeros', (['(self.D,)'], {}), '((self.D,))\n', (449, 460), True, 'import numpy as np\n'), ((506, 524), 'numpy.array', 'np.array', (['(cs + ctx)'], {}), '(cs + ctx)\n', (514, 524), True, 'import numpy as np\n'), ((1209, 1237), 'numpy.zeros', 'np.zeros', (['(self.D * self.T,)'], {}), '((self.D * self.T,))\n', (1217, 1237), True, 'import numpy as np\n'), ((1256, 1284), 'numpy.zeros', 'np.zeros', (['(self.D * self.T,)'], {}), '((self.D * self.T,))\n', (1264, 1284), True, 'import numpy as np\n'), ((1495, 1523), 'numpy.zeros', 'np.zeros', (['(self.D * self.T,)'], {}), '((self.D * self.T,))\n', (1503, 1523), True, 'import numpy as np\n'), ((2690, 2709), 'numpy.zeros', 'np.zeros', (['(self.D,)'], {}), '((self.D,))\n', (2698, 2709), True, 'import numpy as np\n'), ((2843, 2861), 'numpy.array', 'np.array', (['(cs + ctx)'], {}), '(cs + ctx)\n', (2851, 2861), True, 'import numpy as np\n'), ((1416, 1444), 'numpy.zeros', 'np.zeros', (['(self.D * self.T,)'], {}), '((self.D * self.T,))\n', (1424, 1444), True, 'import numpy as np\n'), ((2594, 2613), 'numpy.identity', 'np.identity', (['self.D'], {}), '(self.D)\n', (2605, 2613), True, 'import numpy as np\n'), ((2756, 2775), 'numpy.identity', 'np.identity', (['self.D'], {}), '(self.D)\n', (2767, 2775), True, 'import numpy as np\n'), ((3812, 3870), 'random.choices', 'random.choices', (['[1, 0]'], {'weights': '[self.prob, 1 - self.prob]'}), '([1, 0], weights=[self.prob, 1 - self.prob])\n', (3826, 3870), False, 'import random\n'), ((1733, 1761), 'numpy.zeros', 'np.zeros', (['(self.D * self.T,)'], {}), '((self.D * self.T,))\n', (1741, 1761), True, 'import numpy as np\n')] |
"""
Run a PD-controller with the parameter from Quanser on the real device.
By default all controllers in this script run infinitely.
"""
import torch as to
import numpy as np
import pyrado
from pyrado.environments.pysim.quanser_qube import QQubeSim
from pyrado.environments.quanser.quanser_ball_balancer import QBallBalancerReal
from pyrado.environments.quanser.quanser_cartpole import QCartPoleSwingUpReal, QCartPoleStabReal
from pyrado.environments.quanser.quanser_qube import QQubeReal
from pyrado.policies.environment_specific import QBallBalancerPDCtrl, QCartPoleSwingUpAndBalanceCtrl,\
QQubeSwingUpAndBalanceCtrl
from pyrado.sampling.rollout import rollout, after_rollout_query
from pyrado.utils.argparser import get_argparser
from pyrado.utils.data_types import RenderMode, EnvSpec
from pyrado.utils.input_output import print_cbt
from pyrado.policies.base import Policy
class CartpoleStabilizerPolicy(Policy):
""" Swing-up and balancing controller for the Quanser Cart-Pole """
def __init__(self,
env_spec: EnvSpec,
K: np.ndarray = np.array(
[1.2278416e+00, 4.5279346e+00, -1.2385756e-02, 6.0038762e+00, -4.1818547e+00]),
u_max: float = 18.,
v_max: float = 12.):
"""
Constructor
:param env_spec: environment specification
:param u_max: maximum energy gain
:param v_max: maximum voltage the control signal will be clipped to
:param long: flag for long or short pole
"""
super().__init__(env_spec)
# Store inputs
self.u_max = u_max
self.v_max = v_max
self.K_pd = to.tensor(K)
self._max_u = 3.0
def init_param(self, init_values: to.Tensor = None, **kwargs):
pass
def forward(self, obs: to.Tensor) -> to.Tensor:
"""
Calculate the controller output.
:param obs: observation from the environment
:return act: controller output [V]
"""
x, sin_th, cos_th, x_dot, theta_dot = obs
act = self.K_pd.dot(obs)
# Return the clipped action
act = act.clamp(-self.v_max, self.v_max)
# Denormalize action
lb, ub = -self._max_u, self._max_u
act = lb + (act + 1) * (ub - lb) / 2
# Bound
act = self._bound(act, -self._max_u, self._max_u)
return act.view(1) # such that when act is later converted to numpy it does not become a float
@staticmethod
def _bound(x, min_value, max_value):
"""
Method used to bound state and action variables.
Args:
x: the variable to bound;
min_value: the minimum value;
max_value: the maximum value;
Returns:
The bounded variable.
"""
return np.maximum(min_value, np.minimum(x, max_value))
# python ... -env-name qcp-st
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Set up PD-controller
if args.env_name in QBallBalancerReal.name:
env = QBallBalancerReal(args.dt, args.max_steps)
policy = QBallBalancerPDCtrl(env.spec, kp=to.diag(to.tensor([3.45, 3.45])), kd=to.diag(to.tensor([2.11, 2.11])))
print_cbt('Set up controller for the QBallBalancerReal environment.', 'c')
elif args.env_name == QCartPoleStabReal.name:
env = QCartPoleStabReal(args.dt, args.max_steps)
policy = CartpoleStabilizerPolicy(
env.spec,
K=np.array([1.2278416e+00, 4.5279346e+00, -1.2385756e-02, 6.0038762e+00, -4.1818547e+00])
)
# policy = QCartPoleSwingUpAndBalanceCtrl(env.spec)
print_cbt('Set up controller for the QCartPoleStabReal environment.', 'c')
elif args.env_name == QCartPoleSwingUpReal.name:
env = QCartPoleSwingUpReal(args.dt, args.max_steps)
policy = QCartPoleSwingUpAndBalanceCtrl(env.spec)
print_cbt('Set up controller for the QCartPoleSwingUpReal environment.', 'c')
elif args.env_name == QQubeReal.name:
env = QQubeReal(args.dt, args.max_steps)
# policy = QQubeSwingUpAndBalanceCtrl(env.spec)
# MVD - Learned for the paper
policy = QQubeSwingUpAndBalanceCtrl(
env.spec,
ref_energy=np.exp(-2.9414043),
energy_gain=np.exp(3.1400251),
energy_th_gain=0.73774934, # for simulation and real system
acc_max=5., # Quanser's value: 6
alpha_max_pd_enable=10., # Quanser's value: 20
pd_gains=to.tensor([-1.9773294, 35.084324, -1.1951622, 3.3797605]))
print_cbt('Set up controller for the QQubeReal environment.', 'c')
else:
raise pyrado.ValueErr(given=args.env_name,
eq_constraint=f'{QBallBalancerReal.name}, {QCartPoleSwingUpReal.name}, '
f'{QCartPoleStabReal.name}, or {QQubeReal.name}')
# Run on device
done = False
while not done:
print_cbt('Running predefined controller ...', 'c', bright=True)
ro = rollout(env, policy, eval=True, render_mode=RenderMode(text=args.verbose))
done, _, _ = after_rollout_query(env, policy, ro)
| [
"pyrado.utils.input_output.print_cbt",
"pyrado.environments.quanser.quanser_cartpole.QCartPoleStabReal",
"pyrado.environments.quanser.quanser_ball_balancer.QBallBalancerReal",
"pyrado.utils.data_types.RenderMode",
"numpy.minimum",
"pyrado.environments.quanser.quanser_qube.QQubeReal",
"pyrado.utils.argpa... | [((1091, 1160), 'numpy.array', 'np.array', (['[1.2278416, 4.5279346, -0.012385756, 6.0038762, -4.1818547]'], {}), '([1.2278416, 4.5279346, -0.012385756, 6.0038762, -4.1818547])\n', (1099, 1160), True, 'import numpy as np\n'), ((1674, 1686), 'torch.tensor', 'to.tensor', (['K'], {}), '(K)\n', (1683, 1686), True, 'import torch as to\n'), ((3098, 3140), 'pyrado.environments.quanser.quanser_ball_balancer.QBallBalancerReal', 'QBallBalancerReal', (['args.dt', 'args.max_steps'], {}), '(args.dt, args.max_steps)\n', (3115, 3140), False, 'from pyrado.environments.quanser.quanser_ball_balancer import QBallBalancerReal\n'), ((3270, 3344), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['"""Set up controller for the QBallBalancerReal environment."""', '"""c"""'], {}), "('Set up controller for the QBallBalancerReal environment.', 'c')\n", (3279, 3344), False, 'from pyrado.utils.input_output import print_cbt\n'), ((5032, 5096), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['"""Running predefined controller ..."""', '"""c"""'], {'bright': '(True)'}), "('Running predefined controller ...', 'c', bright=True)\n", (5041, 5096), False, 'from pyrado.utils.input_output import print_cbt\n'), ((5206, 5242), 'pyrado.sampling.rollout.after_rollout_query', 'after_rollout_query', (['env', 'policy', 'ro'], {}), '(env, policy, ro)\n', (5225, 5242), False, 'from pyrado.sampling.rollout import rollout, after_rollout_query\n'), ((2846, 2870), 'numpy.minimum', 'np.minimum', (['x', 'max_value'], {}), '(x, max_value)\n', (2856, 2870), True, 'import numpy as np\n'), ((2979, 2994), 'pyrado.utils.argparser.get_argparser', 'get_argparser', ([], {}), '()\n', (2992, 2994), False, 'from pyrado.utils.argparser import get_argparser\n'), ((3410, 3452), 'pyrado.environments.quanser.quanser_cartpole.QCartPoleStabReal', 'QCartPoleStabReal', (['args.dt', 'args.max_steps'], {}), '(args.dt, args.max_steps)\n', (3427, 3452), False, 'from pyrado.environments.quanser.quanser_cartpole import QCartPoleSwingUpReal, QCartPoleStabReal\n'), ((3698, 3772), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['"""Set up controller for the QCartPoleStabReal environment."""', '"""c"""'], {}), "('Set up controller for the QCartPoleStabReal environment.', 'c')\n", (3707, 3772), False, 'from pyrado.utils.input_output import print_cbt\n'), ((3841, 3886), 'pyrado.environments.quanser.quanser_cartpole.QCartPoleSwingUpReal', 'QCartPoleSwingUpReal', (['args.dt', 'args.max_steps'], {}), '(args.dt, args.max_steps)\n', (3861, 3886), False, 'from pyrado.environments.quanser.quanser_cartpole import QCartPoleSwingUpReal, QCartPoleStabReal\n'), ((3904, 3944), 'pyrado.policies.environment_specific.QCartPoleSwingUpAndBalanceCtrl', 'QCartPoleSwingUpAndBalanceCtrl', (['env.spec'], {}), '(env.spec)\n', (3934, 3944), False, 'from pyrado.policies.environment_specific import QBallBalancerPDCtrl, QCartPoleSwingUpAndBalanceCtrl, QQubeSwingUpAndBalanceCtrl\n'), ((3953, 4030), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['"""Set up controller for the QCartPoleSwingUpReal environment."""', '"""c"""'], {}), "('Set up controller for the QCartPoleSwingUpReal environment.', 'c')\n", (3962, 4030), False, 'from pyrado.utils.input_output import print_cbt\n'), ((5154, 5183), 'pyrado.utils.data_types.RenderMode', 'RenderMode', ([], {'text': 'args.verbose'}), '(text=args.verbose)\n', (5164, 5183), False, 'from pyrado.utils.data_types import RenderMode, EnvSpec\n'), ((3199, 3222), 'torch.tensor', 'to.tensor', (['[3.45, 3.45]'], {}), '([3.45, 3.45])\n', (3208, 3222), True, 'import torch as to\n'), ((3236, 3259), 'torch.tensor', 'to.tensor', (['[2.11, 2.11]'], {}), '([2.11, 2.11])\n', (3245, 3259), True, 'import torch as to\n'), ((3532, 3601), 'numpy.array', 'np.array', (['[1.2278416, 4.5279346, -0.012385756, 6.0038762, -4.1818547]'], {}), '([1.2278416, 4.5279346, -0.012385756, 6.0038762, -4.1818547])\n', (3540, 3601), True, 'import numpy as np\n'), ((4088, 4122), 'pyrado.environments.quanser.quanser_qube.QQubeReal', 'QQubeReal', (['args.dt', 'args.max_steps'], {}), '(args.dt, args.max_steps)\n', (4097, 4122), False, 'from pyrado.environments.quanser.quanser_qube import QQubeReal\n'), ((4639, 4705), 'pyrado.utils.input_output.print_cbt', 'print_cbt', (['"""Set up controller for the QQubeReal environment."""', '"""c"""'], {}), "('Set up controller for the QQubeReal environment.', 'c')\n", (4648, 4705), False, 'from pyrado.utils.input_output import print_cbt\n'), ((4732, 4897), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'given': 'args.env_name', 'eq_constraint': 'f"""{QBallBalancerReal.name}, {QCartPoleSwingUpReal.name}, {QCartPoleStabReal.name}, or {QQubeReal.name}"""'}), "(given=args.env_name, eq_constraint=\n f'{QBallBalancerReal.name}, {QCartPoleSwingUpReal.name}, {QCartPoleStabReal.name}, or {QQubeReal.name}'\n )\n", (4747, 4897), False, 'import pyrado\n'), ((4308, 4326), 'numpy.exp', 'np.exp', (['(-2.9414043)'], {}), '(-2.9414043)\n', (4314, 4326), True, 'import numpy as np\n'), ((4352, 4369), 'numpy.exp', 'np.exp', (['(3.1400251)'], {}), '(3.1400251)\n', (4358, 4369), True, 'import numpy as np\n'), ((4571, 4628), 'torch.tensor', 'to.tensor', (['[-1.9773294, 35.084324, -1.1951622, 3.3797605]'], {}), '([-1.9773294, 35.084324, -1.1951622, 3.3797605])\n', (4580, 4628), True, 'import torch as to\n')] |
import numpy as np
from numpy import *
from astropy import units as u
from scipy.integrate import quad
import math as math
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
import scipy.optimize as opt
from matplotlib import rcParams as rcp
from matplotlib import colors
from matplotlib import rc
plt.rcParams['figure.figsize'] = [9, 6]
plt.rcParams['figure.dpi'] = 100
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
rcp['axes.formatter.useoffset'] = False
rcp['axes.linewidth'] = 1.5
rcp['axes.axisbelow'] = False
rcp['xtick.major.size'] = 8
rcp['xtick.minor.size'] = 4
rcp['xtick.labelsize'] = 15
rcp['legend.fontsize'] = 15
rcp['xtick.direction'] = 'in'
rcp['ytick.major.width'] = 2
rcp['ytick.minor.width'] = 2
rcp['savefig.dpi'] = 300
rcp["figure.dpi"] = 100
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates.angles import Angle
import sympy as sp
c_km = (c.to('km/s').value)
######## PANTHEON data ######################
# import redshifts (cosmological + heliocentric), apparent magnitudes, error of app.magnitudes, systematic errors
######## PANTHEON data ######################
# import redshifts (cosmological + heliocentric), apparent magnitudes, error of app.magnitudes,systematic errors
# Get the data from the github repository Pantheon of Dan Scolnic #
# https://github.com/dscolnic/Pantheon/blob/master/lcparam_full_long_zhel.txt #
data = np.loadtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Scolnic_data_updated.txt', usecols=[1,2,4,5])
### list with all the systematics as found in the PANTHEON catalog ###
# get the full systematics from the same repository #
#https://github.com/dscolnic/Pantheon/blob/master/sys_full_long.txt #
sys = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/systematics.txt', skip_header=1)
### The list sn_names contains all the supernovae names in the PANTHEON catalog ###
sn_names = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Scolnic_data_updated.txt', usecols=[0],dtype='str')
z_cmb= np.array((data[:,0])) ## CMB redshift
z_hel = np.array(np.array(data[:,1])) ## heliocentric redshift
mb = np.array(data[:,2]) ## apparent magnitude
### We select the C11 Scattering Model.
names = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt',dtype='str', skip_header=67, usecols=[1])
########## SEPARATE THE Pantheon sample into 13 subsamples based on idsurvey ##########
idsurvey1 = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt', skip_header=67, usecols=[3], dtype="str").astype(float)
### The idsurvey1= 61, 62, 63, 64, 65, 66 constitute the CfA
### The idsurvey1=61 constitutes the CfA1
### The idsurvey1=62 constitutes the CfA2
### The idsurvey1=65, 66 constitute the CfA4
### The idsurvey1=63, 64 constitute the CfA3
### The idsurvey1=15 constitutes the PS1
### The idsurvey1=4 constitutes the SNLS
### The idsurvey1=100, 101, 106 constitute the HST
### The idsurvey1=1 constitute the SDSS
### The idsurvey1=5 constitutes the CSP
xx_high = z_cmb[(idsurvey1!=15) & (idsurvey1!=1) & (idsurvey1!=4) & (idsurvey1!=5) &
(idsurvey1!=61) & (idsurvey1!=62) & (idsurvey1!= 63) &(idsurvey1!=64) & (idsurvey1!=65) &
(idsurvey1!=66)]
print(len(xx_high))
print(np.min(xx_high))
print(np.max(xx_high))
print(np.median(xx_high))
xx_low = z_cmb[(idsurvey1!=15) & (idsurvey1!=1) & (idsurvey1!=4) & (idsurvey1!=5) & (z_cmb<0.7)]
print(len(xx_low))
print(np.min(xx_low))
print(np.max(xx_low))
print(np.median(xx_low))
xx_SDSS = z_cmb[idsurvey1==1]
print(len(xx_SDSS))
print(np.min(xx_SDSS))
print(np.max(xx_SDSS))
print(np.median(xx_SDSS))
xx_SNLS = z_cmb[idsurvey1==4]
print(len(xx_SNLS))
print(np.min(xx_SNLS))
print(np.max(xx_SNLS))
print(np.median(xx_SNLS))
xx_PS1 = z_cmb[idsurvey1==15]
print(len(xx_PS1))
print(np.min(xx_PS1))
print(np.max(xx_PS1))
print(np.median(xx_PS1))
xx_csp = (z_cmb[idsurvey1==5])
print(len(xx_csp))
print(np.min(xx_csp))
print(np.max(xx_csp))
print(np.median(xx_csp))
############ END #############################################
### COORDINATES ###
### Import the full uncorrected data given in https : // github.com/dscolnic/Pantheon/tree/master/data_fitres.
### We select the C11 Scattering Model.
eqcoor0 = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt',dtype=None, skip_header=67, usecols=np.arange(2,45))
### Right-Ascension of the SN as retrieved from the file above.
### The last 18 values are all zero !
### The Ra and DEC of each SnIa are given in degrees.
ras = np.array([eqcoor0[i][32] for i in range(len(eqcoor0))])
### Declination of the SN as retrieved from the file above.
### The last 18 values are all equal to zero!
decs = np.array([eqcoor0[i][33] for i in range(len(eqcoor0))])
### Print the names of the last 18 SN in the list
print(names[1030:1048])
### Retrieve the 18 missing values of RA and DEC from https://sne.space/
ra18 = np.array([53.15630,189.28829,189.33646, 53.09299, 189.05763, 189.09550, 189.23633, 189.14522, 53.04175, 53.10558, 189.53734, 53.07563, 189.22552, 53.10326, 189.37083, 35.42527, 35.44368, 187.35689])
dec18 = np.array([-27.77956, 62.19116,62.22819, -27.74084, 62.20210, 62.30644, 62.21481, 62.263576, -27.83055, -27.75084, 62.31316, -27.73626, 62.13950, -27.77169, 62.19106, -3.36476, -3.38227, 1.84905 ] )
### Join the 18 last values of the right ascension and declination with the rest
ras[ras==0] = ra18
print(ras[1030:1048])
decs[decs==0] = dec18
print(decs[1030:1048])
print(len(ras), len(decs))
### Transform the equatorial coordinates to galactic
skc = SkyCoord(ra = ras*u.degree, dec=decs*u.degree, frame = 'icrs')
lg, b = skc.galactic.l.value, skc.galactic.b.value
############### Mollweide projection ###############################
### This corresponds to the Figure 1 of our paper
### The distribution of 1048 Pantheon SN in the galactic coordinate system
def plot_mwd(ra,dec, org = 0,label = 'label', color = 'black'):
'''
RA, Dec are arrays of the same length RA takes values in [0,360), Dec in [-90,90],
which represent angles in degrees.
'''
from matplotlib import colors
x=np.remainder(ra+360-org, 360)
ind=x>180
x[ind]-=360 # scale conversion to [-180, 180]
#x=-x
tick_labels=np.array(['210 \xb0'
, '240 \xb0', '270 \xb0', '300 \xb0' ,'330 \xb0', '0 \xb0', '30 \xb0', '60 \xb0', '90 \xb0', '120 \xb0',
'150 \xb0',])
#tick_labels=np.remainder(tick_labels+360,360)
fig = plt.figure(figsize=(14,7))
vmin = np.min(z_cmb)
vmax = 2.27
ax = fig.add_subplot(111,projection="mollweide")
#cmap = plt.get_cmap('jet', 9)
cmap = colors.ListedColormap(["blue", "aqua", "green", "greenyellow", "yellow", "gold", "orange", "red"])
bounds = [0, 0.2, 0.4, 0.6, 0.8, 1, 2.3]
norm = colors.BoundaryNorm(bounds, cmap.N)
im = ax.scatter(np.radians(x),np.radians(dec), marker="o", label = label,c = z_cmb, cmap = cmap, norm = norm, s=6)
cbar = fig.colorbar(im, ax=ax, orientation = "horizontal")
cbar.ax.tick_params(labelsize=9)
ax.set_xticklabels(tick_labels, fontsize=10)
ax.title.set_fontsize(12)
ax.set_xlabel("l")
ax.set_ylabel("b")
ax.yaxis.label.set_fontsize(10)
ax.grid(True, which='both')
plt.show()
plot_mwd(lg, b)
########### END of Mollweide #################################
##################################################################################
### Redshift distribution of the Pantheon sample wrt the different subsamples ####
##################################################################################
### This corresponds to Figure 2 of our paper ###
xx_lows = z_cmb[(idsurvey1!=15) & (idsurvey1!=1) & (idsurvey1!=4) & (z_cmb<0.7)]
fig, ax = plt.subplots()
#plt.figure(figsize=(8,6))
plt.hist(xx_lows, bins=20, alpha=0.9, label="low-$z$")
plt.hist(xx_PS1, bins=20, alpha=0.5, label="$PS1$")
plt.hist(xx_SDSS, bins=20, alpha=0.5, label="$SDSS$")
plt.hist(xx_SNLS, bins=20, alpha=0.7, label="$SNLS$")
plt.hist(xx_high, bins=20, alpha=0.5, label="high-$z$")
plt.xlabel("redshift (z)", size=14)
plt.ylabel("Number of SnIa", size=14)
ax.set_xticks([0.2, 0.5, 1, 1.5, 2, 2.3])
plt.xlim(0.007, 2.29)
plt.legend(loc='upper right')
| [
"numpy.radians",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.loadtxt",
"numpy.genfromtxt",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"numpy.max",
"matplotlib.colors.ListedColormap",
"numpy.min",
"numpy.remainder",
"matplotlib.pyplot.xlim",
"matplotlib.pypl... | [((1447, 1562), 'numpy.loadtxt', 'np.loadtxt', (['"""/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Scolnic_data_updated.txt"""'], {'usecols': '[1, 2, 4, 5]'}), "(\n '/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Scolnic_data_updated.txt',\n usecols=[1, 2, 4, 5])\n", (1457, 1562), True, 'import numpy as np\n'), ((1754, 1851), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/systematics.txt"""'], {'skip_header': '(1)'}), "('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/systematics.txt',\n skip_header=1)\n", (1767, 1851), True, 'import numpy as np\n'), ((1944, 2066), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Scolnic_data_updated.txt"""'], {'usecols': '[0]', 'dtype': '"""str"""'}), "(\n '/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Scolnic_data_updated.txt',\n usecols=[0], dtype='str')\n", (1957, 2066), True, 'import numpy as np\n'), ((2065, 2085), 'numpy.array', 'np.array', (['data[:, 0]'], {}), '(data[:, 0])\n', (2073, 2085), True, 'import numpy as np\n'), ((2171, 2191), 'numpy.array', 'np.array', (['data[:, 2]'], {}), '(data[:, 2])\n', (2179, 2191), True, 'import numpy as np\n'), ((2263, 2401), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt"""'], {'dtype': '"""str"""', 'skip_header': '(67)', 'usecols': '[1]'}), "(\n '/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt',\n dtype='str', skip_header=67, usecols=[1])\n", (2276, 2401), True, 'import numpy as np\n'), ((4976, 5184), 'numpy.array', 'np.array', (['[53.1563, 189.28829, 189.33646, 53.09299, 189.05763, 189.0955, 189.23633, \n 189.14522, 53.04175, 53.10558, 189.53734, 53.07563, 189.22552, 53.10326,\n 189.37083, 35.42527, 35.44368, 187.35689]'], {}), '([53.1563, 189.28829, 189.33646, 53.09299, 189.05763, 189.0955, \n 189.23633, 189.14522, 53.04175, 53.10558, 189.53734, 53.07563, \n 189.22552, 53.10326, 189.37083, 35.42527, 35.44368, 187.35689])\n', (4984, 5184), True, 'import numpy as np\n'), ((5183, 5386), 'numpy.array', 'np.array', (['[-27.77956, 62.19116, 62.22819, -27.74084, 62.2021, 62.30644, 62.21481, \n 62.263576, -27.83055, -27.75084, 62.31316, -27.73626, 62.1395, -\n 27.77169, 62.19106, -3.36476, -3.38227, 1.84905]'], {}), '([-27.77956, 62.19116, 62.22819, -27.74084, 62.2021, 62.30644, \n 62.21481, 62.263576, -27.83055, -27.75084, 62.31316, -27.73626, 62.1395,\n -27.77169, 62.19106, -3.36476, -3.38227, 1.84905])\n', (5191, 5386), True, 'import numpy as np\n'), ((5638, 5700), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(ras * u.degree)', 'dec': '(decs * u.degree)', 'frame': '"""icrs"""'}), "(ra=ras * u.degree, dec=decs * u.degree, frame='icrs')\n", (5646, 5700), False, 'from astropy.coordinates import SkyCoord\n'), ((7780, 7794), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7792, 7794), True, 'import matplotlib.pyplot as plt\n'), ((7822, 7876), 'matplotlib.pyplot.hist', 'plt.hist', (['xx_lows'], {'bins': '(20)', 'alpha': '(0.9)', 'label': '"""low-$z$"""'}), "(xx_lows, bins=20, alpha=0.9, label='low-$z$')\n", (7830, 7876), True, 'import matplotlib.pyplot as plt\n'), ((7877, 7928), 'matplotlib.pyplot.hist', 'plt.hist', (['xx_PS1'], {'bins': '(20)', 'alpha': '(0.5)', 'label': '"""$PS1$"""'}), "(xx_PS1, bins=20, alpha=0.5, label='$PS1$')\n", (7885, 7928), True, 'import matplotlib.pyplot as plt\n'), ((7929, 7982), 'matplotlib.pyplot.hist', 'plt.hist', (['xx_SDSS'], {'bins': '(20)', 'alpha': '(0.5)', 'label': '"""$SDSS$"""'}), "(xx_SDSS, bins=20, alpha=0.5, label='$SDSS$')\n", (7937, 7982), True, 'import matplotlib.pyplot as plt\n'), ((7983, 8036), 'matplotlib.pyplot.hist', 'plt.hist', (['xx_SNLS'], {'bins': '(20)', 'alpha': '(0.7)', 'label': '"""$SNLS$"""'}), "(xx_SNLS, bins=20, alpha=0.7, label='$SNLS$')\n", (7991, 8036), True, 'import matplotlib.pyplot as plt\n'), ((8037, 8092), 'matplotlib.pyplot.hist', 'plt.hist', (['xx_high'], {'bins': '(20)', 'alpha': '(0.5)', 'label': '"""high-$z$"""'}), "(xx_high, bins=20, alpha=0.5, label='high-$z$')\n", (8045, 8092), True, 'import matplotlib.pyplot as plt\n'), ((8093, 8128), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""redshift (z)"""'], {'size': '(14)'}), "('redshift (z)', size=14)\n", (8103, 8128), True, 'import matplotlib.pyplot as plt\n'), ((8129, 8166), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of SnIa"""'], {'size': '(14)'}), "('Number of SnIa', size=14)\n", (8139, 8166), True, 'import matplotlib.pyplot as plt\n'), ((8209, 8230), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.007)', '(2.29)'], {}), '(0.007, 2.29)\n', (8217, 8230), True, 'import matplotlib.pyplot as plt\n'), ((8231, 8260), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (8241, 8260), True, 'import matplotlib.pyplot as plt\n'), ((2120, 2140), 'numpy.array', 'np.array', (['data[:, 1]'], {}), '(data[:, 1])\n', (2128, 2140), True, 'import numpy as np\n'), ((3309, 3324), 'numpy.min', 'np.min', (['xx_high'], {}), '(xx_high)\n', (3315, 3324), True, 'import numpy as np\n'), ((3332, 3347), 'numpy.max', 'np.max', (['xx_high'], {}), '(xx_high)\n', (3338, 3347), True, 'import numpy as np\n'), ((3355, 3373), 'numpy.median', 'np.median', (['xx_high'], {}), '(xx_high)\n', (3364, 3373), True, 'import numpy as np\n'), ((3498, 3512), 'numpy.min', 'np.min', (['xx_low'], {}), '(xx_low)\n', (3504, 3512), True, 'import numpy as np\n'), ((3520, 3534), 'numpy.max', 'np.max', (['xx_low'], {}), '(xx_low)\n', (3526, 3534), True, 'import numpy as np\n'), ((3542, 3559), 'numpy.median', 'np.median', (['xx_low'], {}), '(xx_low)\n', (3551, 3559), True, 'import numpy as np\n'), ((3618, 3633), 'numpy.min', 'np.min', (['xx_SDSS'], {}), '(xx_SDSS)\n', (3624, 3633), True, 'import numpy as np\n'), ((3641, 3656), 'numpy.max', 'np.max', (['xx_SDSS'], {}), '(xx_SDSS)\n', (3647, 3656), True, 'import numpy as np\n'), ((3664, 3682), 'numpy.median', 'np.median', (['xx_SDSS'], {}), '(xx_SDSS)\n', (3673, 3682), True, 'import numpy as np\n'), ((3741, 3756), 'numpy.min', 'np.min', (['xx_SNLS'], {}), '(xx_SNLS)\n', (3747, 3756), True, 'import numpy as np\n'), ((3764, 3779), 'numpy.max', 'np.max', (['xx_SNLS'], {}), '(xx_SNLS)\n', (3770, 3779), True, 'import numpy as np\n'), ((3787, 3805), 'numpy.median', 'np.median', (['xx_SNLS'], {}), '(xx_SNLS)\n', (3796, 3805), True, 'import numpy as np\n'), ((3863, 3877), 'numpy.min', 'np.min', (['xx_PS1'], {}), '(xx_PS1)\n', (3869, 3877), True, 'import numpy as np\n'), ((3885, 3899), 'numpy.max', 'np.max', (['xx_PS1'], {}), '(xx_PS1)\n', (3891, 3899), True, 'import numpy as np\n'), ((3907, 3924), 'numpy.median', 'np.median', (['xx_PS1'], {}), '(xx_PS1)\n', (3916, 3924), True, 'import numpy as np\n'), ((3983, 3997), 'numpy.min', 'np.min', (['xx_csp'], {}), '(xx_csp)\n', (3989, 3997), True, 'import numpy as np\n'), ((4005, 4019), 'numpy.max', 'np.max', (['xx_csp'], {}), '(xx_csp)\n', (4011, 4019), True, 'import numpy as np\n'), ((4027, 4044), 'numpy.median', 'np.median', (['xx_csp'], {}), '(xx_csp)\n', (4036, 4044), True, 'import numpy as np\n'), ((6195, 6228), 'numpy.remainder', 'np.remainder', (['(ra + 360 - org)', '(360)'], {}), '(ra + 360 - org, 360)\n', (6207, 6228), True, 'import numpy as np\n'), ((6316, 6424), 'numpy.array', 'np.array', (["['210 °', '240 °', '270 °', '300 °', '330 °', '0 °', '30 °', '60 °', '90 °',\n '120 °', '150 °']"], {}), "(['210 °', '240 °', '270 °', '300 °', '330 °', '0 °', '30 °',\n '60 °', '90 °', '120 °', '150 °'])\n", (6324, 6424), True, 'import numpy as np\n'), ((6518, 6545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (6528, 6545), True, 'import matplotlib.pyplot as plt\n'), ((6556, 6569), 'numpy.min', 'np.min', (['z_cmb'], {}), '(z_cmb)\n', (6562, 6569), True, 'import numpy as np\n'), ((6685, 6787), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["['blue', 'aqua', 'green', 'greenyellow', 'yellow', 'gold', 'orange', 'red']"], {}), "(['blue', 'aqua', 'green', 'greenyellow', 'yellow',\n 'gold', 'orange', 'red'])\n", (6706, 6787), False, 'from matplotlib import colors\n'), ((6840, 6875), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (6859, 6875), False, 'from matplotlib import colors\n'), ((7296, 7306), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7304, 7306), True, 'import matplotlib.pyplot as plt\n'), ((2493, 2631), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt"""'], {'skip_header': '(67)', 'usecols': '[3]', 'dtype': '"""str"""'}), "(\n '/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt',\n skip_header=67, usecols=[3], dtype='str')\n", (2506, 2631), True, 'import numpy as np\n'), ((4414, 4430), 'numpy.arange', 'np.arange', (['(2)', '(45)'], {}), '(2, 45)\n', (4423, 4430), True, 'import numpy as np\n'), ((6896, 6909), 'numpy.radians', 'np.radians', (['x'], {}), '(x)\n', (6906, 6909), True, 'import numpy as np\n'), ((6910, 6925), 'numpy.radians', 'np.radians', (['dec'], {}), '(dec)\n', (6920, 6925), True, 'import numpy as np\n')] |
import numpy as np
from os import path
def __to_float_array(x):
if type(x) is list or type(x) is tuple:
x = np.asarray(x)
elif type(x) is not np.ndarray:
raise TypeError('x is not an array_like object')
if x.dtype != 'float' and x.dtype != 'int':
raise TypeError('The values of x must be convertable to float')
return x
def to_units(x, y, dy=None, dx=None, x_unit=1, y_unit=1):
if dx is None and dy is None:
return x / x_unit, y / y_unit
if dx is None:
return x / x_unit, y / y_unit, dy / y_unit
if dy is None:
return x / x_unit, y / y_unit, dx / x_unit
else:
return x / x_unit, y / y_unit, dy / y_unit, dx / x_unit
def linreg(x, y, dy, dx=None):
change_max = 0.00001
zero_replacement = 1e-80
# Regression iteration, for dx is None only one iteration is needed
def linreg_iter(x, y, dy):
dy[dy == 0] = zero_replacement
s0 = np.sum(1 / dy**2)
s1 = np.sum(x / dy**2)
s2 = np.sum(y / dy**2)
s3 = np.sum(x**2 / dy**2)
s4 = np.sum(x * y / dy**2)
eta = s0 * s3 - s1**2
s = (s0 * s4 - s1 * s2) / eta
ds = np.sqrt(s0 / eta)
b = (s3 * s2 - s1 * s4) / eta
db = np.sqrt(s3 / eta)
return s, ds, b, db
x = __to_float_array(x)
y = __to_float_array(y)
dy = __to_float_array(dy)
# Compute slope and axis intercept for dx not specified
if dx is None:
return linreg_iter(x, y, dy)
dx = __to_float_array(dx)
# Compute slope and axis intercept for dx specified
dy_ = dy.copy()
s, ds, b, db = linreg_iter(x, y, dy_)
while True:
s_old = s
dy_ = np.sqrt((s * dx)**2 + dy_**2)
s, *_ = linreg_iter(x, y, dy_)
if abs(1 - s_old / s) < change_max:
break
return linreg_iter(x, y, dy_)
def x_fit_like(a, num=1000):
return np.linspace(np.min(a), np.max(a), num)
def linreg_lines(x, s, ds, b, db, align='flat'):
if align != 'flat' and align != 'steep':
raise ValueError('Unknown option for align')
y_line = s * x + b
flat_is_plus = abs(s - ds) > abs(s + ds)
if (align == 'flat' and flat_is_plus
or align == 'steep' and not flat_is_plus):
y_uline = (s + ds) * x + (b - db)
elif (align == 'flat' and not flat_is_plus
or align == 'steep' and flat_is_plus):
y_uline = (s - ds) * x + (b + db)
return y_line, y_uline
def get_fig_paths(folder_path, fignums, format='pdf'):
fig_paths = [''] * len(fignums)
for i, num in enumerate(fignums):
file_name = 'fig%i.%s' % (num, format)
fig_paths[i] = path.join(folder_path, file_name)
return fig_paths
| [
"numpy.sqrt",
"numpy.asarray",
"os.path.join",
"numpy.max",
"numpy.sum",
"numpy.min"
] | [((115, 128), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (125, 128), True, 'import numpy as np\n'), ((894, 913), 'numpy.sum', 'np.sum', (['(1 / dy ** 2)'], {}), '(1 / dy ** 2)\n', (900, 913), True, 'import numpy as np\n'), ((921, 940), 'numpy.sum', 'np.sum', (['(x / dy ** 2)'], {}), '(x / dy ** 2)\n', (927, 940), True, 'import numpy as np\n'), ((948, 967), 'numpy.sum', 'np.sum', (['(y / dy ** 2)'], {}), '(y / dy ** 2)\n', (954, 967), True, 'import numpy as np\n'), ((975, 999), 'numpy.sum', 'np.sum', (['(x ** 2 / dy ** 2)'], {}), '(x ** 2 / dy ** 2)\n', (981, 999), True, 'import numpy as np\n'), ((1005, 1028), 'numpy.sum', 'np.sum', (['(x * y / dy ** 2)'], {}), '(x * y / dy ** 2)\n', (1011, 1028), True, 'import numpy as np\n'), ((1097, 1114), 'numpy.sqrt', 'np.sqrt', (['(s0 / eta)'], {}), '(s0 / eta)\n', (1104, 1114), True, 'import numpy as np\n'), ((1158, 1175), 'numpy.sqrt', 'np.sqrt', (['(s3 / eta)'], {}), '(s3 / eta)\n', (1165, 1175), True, 'import numpy as np\n'), ((1570, 1603), 'numpy.sqrt', 'np.sqrt', (['((s * dx) ** 2 + dy_ ** 2)'], {}), '((s * dx) ** 2 + dy_ ** 2)\n', (1577, 1603), True, 'import numpy as np\n'), ((1770, 1779), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (1776, 1779), True, 'import numpy as np\n'), ((1781, 1790), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (1787, 1790), True, 'import numpy as np\n'), ((2471, 2504), 'os.path.join', 'path.join', (['folder_path', 'file_name'], {}), '(folder_path, file_name)\n', (2480, 2504), False, 'from os import path\n')] |
#!/usr/bin/env python
# coding: utf-8
import random
import os
import sys
import cv2
import csv
import glob
import numpy as np
import time
import psutil
from sklearn.utils import shuffle
from keras.layers import Input, Conv2D, MaxPooling2D, BatchNormalization, LeakyReLU, Flatten, Dense
from keras.activations import linear
from keras.models import Model, model_from_json
import numpy as np
from keras.callbacks import Callback, LearningRateScheduler
from keras.optimizers import Adam
from keras import backend as K
import tensorflow as tf
import os
from sklearn.metrics import roc_auc_score
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
from sklearn.metrics import mean_squared_error
def load_model(model_path):
with open(model_path + 'auto_model.json', 'r') as jfile:
model_svdd = model_from_json(jfile.read())
model_svdd.load_weights(model_path + 'auto_model.h5')
return model_svdd
#Load complete input images without shuffling
def load_images(paths):
numImages = 0
inputs = []
for path in paths:
numFiles = len(glob.glob1(path,'*.png'))
numImages += numFiles
for img in glob.glob(path+'*.png'):
img = cv2.imread(img)
img = cv2.resize(img, (224, 224))
img = img / 255.
inputs.append(img)
#inpu = shuffle(inputs)
print("Total number of images:%d" %(numImages))
return inputs
def createFolderPaths(train_data_path, train_folders):
paths = []
for folder in train_folders:
path = train_data_path + folder + '/'
paths.append(path)
return paths
def load_training_images(train_data_path, train_folders):
paths = createFolderPaths(train_data_path, train_folders)
return load_images(paths)
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
#Load complete input images without shuffling
def load_training_images1(train_data):
inputs = []
comp_inp = []
with open(train_data + 'calibration.csv', 'rt') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
img = cv2.imread(train_data + row[0])
img = cv2.resize(img, (224, 224))
img = img / 255.
inputs.append(img)
return inputs
def vae_prediction(model_vae,test_data_path,test_folders):
print("==============PREDICTING THE LABELS ==============================")
#X_validate = load_training_images(test_data_path, test_folders)
X_validate = load_training_images(test_data_path,test_folders)
X_validate = np.array(X_validate)
X_validate = np.reshape(X_validate, [-1, X_validate.shape[1],X_validate.shape[2],X_validate.shape[3]])
anomaly=0
tval = []
dist_val=[]
for i in range(0,len(X_validate)):
val=[]
anomaly_val = 0
t1 = time.time()
img = np.array(X_validate[i])[np.newaxis]
predicted_reps = model_vae.predict(img)
dist = mse(predicted_reps, img)
cpu = psutil.cpu_percent()
t2 = time.time()-t1
print(dist)
dist_val.append(dist)
# gives an object with many fields
#mem = psutil.virtual_memory().total / (1024.0 ** 3)#virtual memory stats
process = psutil.Process(os.getpid())
mem = process.memory_info().rss/ (1000.0 ** 3)
#dist = np.sum(((predicted_reps.any() - img.any()) ** 2), axis=1)
if(dist > 7.5): #where 10.0 is the threshold.
anomaly_val = 1
anomaly+=1
tval.append(t2)
val.append(anomaly_val)
#val.append(cpu)
#val.append(frame_time)
with open('/home/scope/Carla/CARLA_0.9.6/PythonAPI/SVDD/SVDD/tcps-evaluation/vae-illumination-change-light.csv', 'a') as file:
writer = csv.writer(file)
writer.writerow(val)
print(anomaly)
print(sum(tval)/len(X_validate))
print(max(dist_val))
if __name__ == '__main__':
test_data_path = "/home/scope/Carla/CARLA_0.9.6/PythonAPI/new/dataset/" #SVDD/data-generator/" #"/home/scope/Carla/CARLA_0.9.6/PythonAPI/CarlaData/"
test_folders = ["new-road"]
model_path = "/home/scope/Carla/CARLA_0.9.6/PythonAPI/SVDD/VAE/train-illumination/" #path to save the svdd weights
model_vae=load_model(model_path)
vae_prediction(model_vae,test_data_path,test_folders)
| [
"numpy.reshape",
"psutil.cpu_percent",
"glob.glob1",
"csv.writer",
"numpy.array",
"csv.reader",
"os.getpid",
"cv2.resize",
"time.time",
"glob.glob",
"cv2.imread"
] | [((3013, 3033), 'numpy.array', 'np.array', (['X_validate'], {}), '(X_validate)\n', (3021, 3033), True, 'import numpy as np\n'), ((3051, 3146), 'numpy.reshape', 'np.reshape', (['X_validate', '[-1, X_validate.shape[1], X_validate.shape[2], X_validate.shape[3]]'], {}), '(X_validate, [-1, X_validate.shape[1], X_validate.shape[2],\n X_validate.shape[3]])\n', (3061, 3146), True, 'import numpy as np\n'), ((1250, 1275), 'glob.glob', 'glob.glob', (["(path + '*.png')"], {}), "(path + '*.png')\n", (1259, 1275), False, 'import glob\n'), ((2470, 2489), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (2480, 2489), False, 'import csv\n'), ((3276, 3287), 'time.time', 'time.time', ([], {}), '()\n', (3285, 3287), False, 'import time\n'), ((3440, 3460), 'psutil.cpu_percent', 'psutil.cpu_percent', ([], {}), '()\n', (3458, 3460), False, 'import psutil\n'), ((1175, 1200), 'glob.glob1', 'glob.glob1', (['path', '"""*.png"""'], {}), "(path, '*.png')\n", (1185, 1200), False, 'import glob\n'), ((1293, 1308), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (1303, 1308), False, 'import cv2\n'), ((1327, 1354), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (1337, 1354), False, 'import cv2\n'), ((2543, 2574), 'cv2.imread', 'cv2.imread', (['(train_data + row[0])'], {}), '(train_data + row[0])\n', (2553, 2574), False, 'import cv2\n'), ((2597, 2624), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (2607, 2624), False, 'import cv2\n'), ((3302, 3325), 'numpy.array', 'np.array', (['X_validate[i]'], {}), '(X_validate[i])\n', (3310, 3325), True, 'import numpy as np\n'), ((3474, 3485), 'time.time', 'time.time', ([], {}), '()\n', (3483, 3485), False, 'import time\n'), ((3697, 3708), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3706, 3708), False, 'import os\n'), ((4215, 4231), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (4225, 4231), False, 'import csv\n')] |
from __future__ import print_function
import os
import sys
import time
import json
import argparse
import random
random.seed(49999)
import numpy
numpy.random.seed(49999)
import tensorflow
tensorflow.set_random_seed(49999)
from collections import OrderedDict
from zoo.pipeline.api.autograd import *
from zoo.pipeline.api.keras.layers import *
from zoo.pipeline.api.keras.models import *
from bigdl.keras.converter import WeightsConverter
from bigdl.optim.optimizer import Adam
from zoo.common.nncontext import *
from zoo.pipeline.api.keras.engine.topology import *
import numpy as np
import keras.backend as KK
from keras.engine.training import _standardize_input_data
import keras.layers as klayers
from utils import *
import inputs
import metrics
np.random.seed(1330)
config = tensorflow.ConfigProto()
config.gpu_options.allow_growth = True
sess = tensorflow.Session(config = config)
def load_zoo_model(config):
global_conf = config["global"]
model_type = global_conf['model_type']
model_config = config['zmodel']['setting']
model_config.update(config['inputs']['share'])
sys.path.insert(0, config['zmodel']['model_path'])
model = import_object(config['zmodel']['model_py'], model_config)
mo = model.build()
return mo
def load_keras2_model(config):
global_conf = config["global"]
model_type = global_conf['model_type']
model_config = config['kmodel']['setting']
model_config.update(config['inputs']['share'])
sys.path.insert(0, config['kmodel']['model_path'])
model = import_object(config['kmodel']['model_py'], model_config)
mo = model.build()
return mo
def zloss(**kwargs):
if isinstance(kwargs, dict) and 'batch' in kwargs: #[b, 2, 1] [b, 1]
batch = kwargs['batch']
def _zloss(y_true, y_pred):
y_pred = y_pred + y_true - y_true
margin = 1.0
pos = y_pred.index_select(1, 0)
neg = y_pred.index_select(1, 1)
loss = maximum(neg - pos + margin, 0.)
return loss
return _zloss
def kloss(y_true, y_pred):
margin = 1.0
y_pos = klayers.Lambda(lambda a: a[::2, :], output_shape=(1,))(y_pred)
y_neg = klayers.Lambda(lambda a: a[1::2, :], output_shape=(1,))(y_pred)
loss = KK.maximum(0., margin + y_neg - y_pos)
return KK.mean(loss)
def pair(query, doc):
result = []
for i in range(0, query.shape[0], 2):
t1 = np.stack((query[i], query[i + 1]), axis=0)
t2 = np.stack((doc[i], doc[i + 1]), axis=0)
c = np.concatenate([t1, t2], axis=1)
result.append(c)
return result
def preprocess(input_data):
result = []
for x in input_data:
t = pair(x[0], x[1])
result.append(t)
result = np.concatenate(result, axis=0)
return result
def eval(eval_gen, eval_metrics, zmodel):
for tag, generator in eval_gen.items():
genfun = generator.get_batch_generator()
print('[%s]\t[Eval:%s] ' % (time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(time.time())), tag), end='')
res = dict([[k,0.] for k in eval_metrics.keys()])
num_valid = 0
for input_data, y_true in genfun:
names = ['query', 'doc']
shapes = [(None, 10), (None, 40)]
list_input_data = _standardize_input_data(input_data, names, shapes, check_batch_axis=False)
preprocessed_input_data = np.concatenate((list_input_data[0], list_input_data[1]), axis=1)
y_pred = zmodel.forward(preprocessed_input_data)
if issubclass(type(generator), inputs.list_generator.ListBasicGenerator):
list_counts = input_data['list_counts']
for k, eval_func in eval_metrics.items():
for lc_idx in range(len(list_counts)-1):
pre = list_counts[lc_idx]
suf = list_counts[lc_idx+1]
res[k] += eval_func(y_true = y_true[pre:suf], y_pred = y_pred[pre:suf])
num_valid += len(list_counts) - 1
else:
for k, eval_func in eval_metrics.items():
res[k] += eval_func(y_true = y_true, y_pred = y_pred)
num_valid += 1
generator.reset()
i_e = 0
print('Iter:%d\t%s' % (i_e, '\t'.join(['%s=%f'%(k,v/num_valid) for k, v in res.items()])), end='\n')
sys.stdout.flush()
# Return List(batch_input, batch_input, .....)
# there are totally 8995 pair in the dataset, and each time we would take a batch(100) samples
# roughly, set batch_num=100 would take the entire pairs.
def generate_training_data(train_gen, batch_num):
zoo_input_data = []
zoo_label = []
count = 0
while True:
for tag, generator in train_gen.items():
genfun = generator.get_batch_generator()
for input_data, y_true_value in genfun:
count += 1
if count > batch_num:
return (zoo_input_data, zoo_label)
names = ['query', 'doc']
shapes = [(None, 10), (None, 40)]
list_input_data = _standardize_input_data(input_data, names, shapes,
check_batch_axis=False)
zoo_input_data.append(list_input_data)
y_true_value = np.expand_dims(y_true_value, 1)
zoo_label.append(y_true_value)
def train(config):
print(json.dumps(config, indent=2), end='\n')
# read basic config
global_conf = config["global"]
weights_file = str(global_conf['weights_file']) + '.%d'
display_interval = int(global_conf['display_interval'])
num_iters = int(global_conf['num_iters'])
save_weights_iters = int(global_conf['save_weights_iters'])
# read input config
input_conf = config['inputs']
share_input_conf = input_conf['share']
# collect embedding
if 'embed_path' in share_input_conf:
embed_dict = read_embedding(filename=share_input_conf['embed_path'])
_PAD_ = share_input_conf['vocab_size'] - 1
embed_dict[_PAD_] = np.zeros((share_input_conf['embed_size'], ), dtype=np.float32)
embed = np.float32(np.random.uniform(-0.2, 0.2, [share_input_conf['vocab_size'], share_input_conf['embed_size']]))
share_input_conf['embed'] = convert_embed_2_numpy(embed_dict, embed = embed)
else:
embed = np.float32(np.random.uniform(-0.2, 0.2, [share_input_conf['vocab_size'], share_input_conf['embed_size']]))
share_input_conf['embed'] = embed
print('[Embedding] Embedding Load Done.', end='\n')
# list all input tags and construct tags config
input_train_conf = OrderedDict()
input_eval_conf = OrderedDict()
for tag in input_conf.keys():
if 'phase' not in input_conf[tag]:
continue
if input_conf[tag]['phase'] == 'TRAIN':
input_train_conf[tag] = {}
input_train_conf[tag].update(share_input_conf)
input_train_conf[tag].update(input_conf[tag])
elif input_conf[tag]['phase'] == 'EVAL':
input_eval_conf[tag] = {}
input_eval_conf[tag].update(share_input_conf)
input_eval_conf[tag].update(input_conf[tag])
print('[Input] Process Input Tags. %s in TRAIN, %s in EVAL.' % (input_train_conf.keys(), input_eval_conf.keys()), end='\n')
# collect dataset identification
dataset = {}
for tag in input_conf:
if tag != 'share' and input_conf[tag]['phase'] == 'PREDICT':
continue
if 'text1_corpus' in input_conf[tag]:
datapath = input_conf[tag]['text1_corpus']
if datapath not in dataset:
dataset[datapath], _ = read_data(datapath)
if 'text2_corpus' in input_conf[tag]:
datapath = input_conf[tag]['text2_corpus']
if datapath not in dataset:
dataset[datapath], _ = read_data(datapath)
print('[Dataset] %s Dataset Load Done.' % len(dataset), end='\n')
# initial data generator
train_gen = OrderedDict()
eval_gen = OrderedDict()
for tag, conf in input_train_conf.items():
print(conf, end='\n')
conf['data1'] = dataset[conf['text1_corpus']]
conf['data2'] = dataset[conf['text2_corpus']]
generator = inputs.get(conf['input_type'])
train_gen[tag] = generator( config = conf )
for tag, conf in input_eval_conf.items():
print(conf, end='\n')
conf['data1'] = dataset[conf['text1_corpus']]
conf['data2'] = dataset[conf['text2_corpus']]
generator = inputs.get(conf['input_type'])
eval_gen[tag] = generator( config = conf )
######### Load Model #########
zmodel, kmodel = load_model(config)
input = Input(name='input', shape=(2, 50))
timeDistributed = TimeDistributed(layer = zmodel, input_shape=(2, 50))(input)
z_knrm_model = Model(input=input, output=timeDistributed)
eval_metrics = OrderedDict()
for mobj in config['metrics']:
mobj = mobj.lower()
if '@' in mobj:
mt_key, mt_val = mobj.split('@', 1)
eval_metrics[mobj] = metrics.get(mt_key)(int(mt_val))
else:
eval_metrics[mobj] = metrics.get(mobj)
epoch_num = 400
batch_size = 200 # take a look at the config
batch_num_per_epoch = 10
#train_as_whole(z_knrm_model, zmodel, train_gen, eval_gen, eval_metrics)
z_knrm_model.set_tensorboard("/tmp/matchzoo", "knrm-sgd-1e4")
# train_per_epoch(z_knrm_model, zmodel, train_gen, eval_gen, eval_metrics, optimMethod=SGD(1e-4))
train_per_epoch(z_knrm_model, zmodel, train_gen, eval_gen, eval_metrics, optimMethod=SGD(1e-4, leaningrate_schedule=Poly(0.5, 50 * 400)))
#train_per_epoch(z_knrm_model, zmodel, train_gen, eval_gen, eval_metrics, optimMethod="adam")
def train_per_epoch(z_knrm_model, zmodel, train_gen, eval_gen, eval_metrics, epoch_num=400, batch_size=200, optimMethod=SGD(1e-4)):
z_knrm_model.compile(optimizer=optimMethod, loss=zloss(batch=10))
print('[Model] Model Compile Done.', end='\n')
total_batches = 100
total_samples = total_batches * batch_size
# The generating logic would try to take 100 batch from gen and each batch has 100 samples.
# 100 batch is enough, as there are only around 10000 pairs in the dataset.
# Each time the training process take 200 samples to assemble a batch for training,
# so there are 50 iteration for each epoch.
(zoo_input_data, zoo_label) = generate_training_data(train_gen, batch_num=total_batches)
new_zinput = preprocess(zoo_input_data)
zoo_label = np.ones([int(total_samples/2), 2, 1])
for i in range(0, epoch_num):
z_knrm_model.fit(new_zinput, zoo_label, batch_size=200, nb_epoch=1, distributed=False)
# z_knrm_model.saveModel('new_model_Adam.model', over_write=True)
# zmodel.saveModel('zmodel.model', over_write=True)
eval(eval_gen, eval_metrics, zmodel)
def train_as_whole(z_knrm_model, zmodel, train_gen, eval_gen, eval_metrics):
z_knrm_model.compile(optimizer='adam', loss=zloss(batch=10))
print('[Model] Model Compile Done.', end='\n')
epoch_num = 400
batch_num_per_epoch = 10
batch_size = 200 # take a look at the config
total_batches = epoch_num * batch_num_per_epoch
total_samples = total_batches * batch_size
(zoo_input_data, zoo_label) = generate_training_data(train_gen, batch_num=total_batches)
new_zinput = preprocess(zoo_input_data)
zoo_label = np.ones([int(total_samples/2), 2, 1])
z_knrm_model.fit(new_zinput, zoo_label, batch_size=200, nb_epoch=1, distributed=False)
z_knrm_model.saveModel('new_model_Adam.model', over_write=True)
zmodel.saveModel('zmodel.model', over_write=True)
eval(eval_gen, eval_metrics, zmodel)
def set_weights_per_layer(kmodel, zmodel, layer_name):
klayer = kmodel.get_layer(layer_name)
klayer_weights = klayer.get_weights()
zlayer_weights = WeightsConverter.to_bigdl_weights(klayer, klayer_weights)
zlayer = [l for l in zmodel.layers if l.name() == layer_name][0] # assert the result length is 1
zlayer.set_weights(zlayer_weights)
def load_model(config):
zmodel = load_zoo_model(config)
# model.load_weights(weights_file)
kmodel = load_keras2_model(config)
######## Get and Set Weights ########
set_weights_per_layer(kmodel, zmodel, "embedding")
set_weights_per_layer(kmodel, zmodel, "dense")
return zmodel, kmodel
def predict(config):
######## Read input config ########
print(json.dumps(config, indent=2), end='\n')
input_conf = config['inputs']
share_input_conf = input_conf['share']
# collect embedding
if 'embed_path' in share_input_conf:
embed_dict = read_embedding(filename=share_input_conf['embed_path'])
_PAD_ = share_input_conf['vocab_size'] - 1
embed_dict[_PAD_] = np.zeros((share_input_conf['embed_size'], ), dtype=np.float32)
embed = np.float32(np.random.uniform(-0.02, 0.02, [share_input_conf['vocab_size'], share_input_conf['embed_size']]))
share_input_conf['embed'] = convert_embed_2_numpy(embed_dict, embed = embed)
else:
embed = np.float32(np.random.uniform(-0.2, 0.2, [share_input_conf['vocab_size'], share_input_conf['embed_size']]))
share_input_conf['embed'] = embed
print('[Embedding] Embedding Load Done.', end='\n')
# list all input tags and construct tags config
input_predict_conf = OrderedDict()
for tag in input_conf.keys():
if 'phase' not in input_conf[tag]:
continue
if input_conf[tag]['phase'] == 'PREDICT':
input_predict_conf[tag] = {}
input_predict_conf[tag].update(share_input_conf)
input_predict_conf[tag].update(input_conf[tag])
print('[Input] Process Input Tags. %s in PREDICT.' % (input_predict_conf.keys()), end='\n')
# collect dataset identification
dataset = {}
for tag in input_conf:
if tag == 'share' or input_conf[tag]['phase'] == 'PREDICT':
if 'text1_corpus' in input_conf[tag]:
datapath = input_conf[tag]['text1_corpus']
if datapath not in dataset:
dataset[datapath], _ = read_data(datapath)
if 'text2_corpus' in input_conf[tag]:
datapath = input_conf[tag]['text2_corpus']
if datapath not in dataset:
dataset[datapath], _ = read_data(datapath)
print('[Dataset] %s Dataset Load Done.' % len(dataset), end='\n')
# initial data generator
predict_gen = OrderedDict()
for tag, conf in input_predict_conf.items():
print(conf, end='\n')
conf['data1'] = dataset[conf['text1_corpus']]
conf['data2'] = dataset[conf['text2_corpus']]
generator = inputs.get(conf['input_type'])
predict_gen[tag] = generator(
#data1 = dataset[conf['text1_corpus']],
#data2 = dataset[conf['text2_corpus']],
config = conf )
######## Read output config ########
output_conf = config['outputs']
######## Load Model ########
global_conf = config["global"]
weights_file = str(global_conf['weights_file']) + '.' + str(global_conf['test_weights_iters'])
zmodel, kmodel = load_model(config)
# test y_pred from zoo model and keras model
# keras2_y_pred = kmodel.predict(input_data, batch_size=batch_size)
# y_pred = model.forward(input_data)
# # y_pred = model.predict(input_data, distributed=False)
# equal = np.allclose(y_pred, keras2_y_pred, rtol=1e-5, atol=1e-5)
# print(equal)
# return y_pred
eval_metrics = OrderedDict()
for mobj in config['metrics']:
mobj = mobj.lower()
if '@' in mobj:
mt_key, mt_val = mobj.split('@', 1)
eval_metrics[mobj] = metrics.get(mt_key)(int(mt_val))
else:
eval_metrics[mobj] = metrics.get(mobj)
res = dict([[k,0.] for k in eval_metrics.keys()])
# batch_size = 20
# query_data = np.random.randint(0, 10000, [batch_size, 10])
# doc_data = np.random.randint(0, 10000, [batch_size, 40])
# input_data = [query_data, doc_data]
# keras2_y_pred = keras2_model.predict(input_data, batch_size=batch_size)
# y_pred = model.predict(input_data, distributed=False)
# equal = np.allclose(y_pred, keras2_y_pred, rtol=1e-5, atol=1e-5)
for tag, generator in predict_gen.items():
genfun = generator.get_batch_generator()
print('[%s]\t[Predict] @ %s ' % (time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(time.time())), tag), end='')
num_valid = 0
res_scores = {}
for input_data, y_true in genfun:
ky_pred = kmodel.predict(input_data, batch_size=len(y_true))
names = ['query', 'doc']
shapes = [(None, 10), (None, 40)]
list_input_data = _standardize_input_data(input_data, names, shapes, check_batch_axis=False)
# list_input_data = [data[0:2, :] for data in list_input_data]
# y_pred = zmodel.predict(list_input_data, distributed=False)
y_pred = zmodel.forward(list_input_data)
equal = np.allclose(y_pred, ky_pred, rtol=1e-5, atol=1e-5)
print(equal)
if issubclass(type(generator), inputs.list_generator.ListBasicGenerator):
list_counts = input_data['list_counts']
for k, eval_func in eval_metrics.items():
for lc_idx in range(len(list_counts)-1):
pre = list_counts[lc_idx]
suf = list_counts[lc_idx+1]
res[k] += eval_func(y_true = y_true[pre:suf], y_pred = y_pred[pre:suf])
y_pred = np.squeeze(y_pred)
for lc_idx in range(len(list_counts)-1):
pre = list_counts[lc_idx]
suf = list_counts[lc_idx+1]
for p, y, t in zip(input_data['ID'][pre:suf], y_pred[pre:suf], y_true[pre:suf]):
if p[0] not in res_scores:
res_scores[p[0]] = {}
res_scores[p[0]][p[1]] = (y, t)
num_valid += len(list_counts) - 1
else:
for k, eval_func in eval_metrics.items():
res[k] += eval_func(y_true = y_true, y_pred = y_pred)
for p, y, t in zip(input_data['ID'], y_pred, y_true):
if p[0] not in res_scores:
res_scores[p[0]] = {}
res_scores[p[0]][p[1]] = (y[1], t[1])
num_valid += 1
generator.reset()
if tag in output_conf:
if output_conf[tag]['save_format'] == 'TREC':
with open(output_conf[tag]['save_path'], 'w') as f:
for qid, dinfo in res_scores.items():
dinfo = sorted(dinfo.items(), key=lambda d:d[1][0], reverse=True)
for inum,(did, (score, gt)) in enumerate(dinfo):
f.write('%s\tQ0\t%s\t%d\t%f\t%s\t%s\n'%(qid, did, inum, score, config['net_name'], gt))
elif output_conf[tag]['save_format'] == 'TEXTNET':
with open(output_conf[tag]['save_path'], 'w') as f:
for qid, dinfo in res_scores.items():
dinfo = sorted(dinfo.items(), key=lambda d:d[1][0], reverse=True)
for inum,(did, (score, gt)) in enumerate(dinfo):
f.write('%s %s %s %s\n'%(gt, qid, did, score))
print('[Predict] results: ', '\t'.join(['%s=%f'%(k,v/num_valid) for k, v in res.items()]), end='\n')
sys.stdout.flush()
def main(argv):
init_nncontext()
parser = argparse.ArgumentParser()
parser.add_argument('--phase', default='train', help='Phase: Can be train or predict, the default value is train.')
parser.add_argument('--model_file', default='./models/knrm_wikiqa.config', help='Model_file: MatchZoo model file for the chosen model.')
args = parser.parse_args()
model_file = args.model_file
with open(model_file, 'r') as f:
config = json.load(f)
phase = args.phase
if args.phase == 'train':
train(config)
elif args.phase == 'predict':
predict(config)
else:
print('Phase Error.', end='\n')
return
if __name__=='__main__':
main(sys.argv)
| [
"sys.path.insert",
"inputs.get",
"keras.backend.maximum",
"tensorflow.set_random_seed",
"argparse.ArgumentParser",
"tensorflow.Session",
"json.dumps",
"numpy.stack",
"numpy.random.seed",
"numpy.concatenate",
"tensorflow.ConfigProto",
"sys.stdout.flush",
"collections.OrderedDict",
"numpy.al... | [((113, 131), 'random.seed', 'random.seed', (['(49999)'], {}), '(49999)\n', (124, 131), False, 'import random\n'), ((145, 169), 'numpy.random.seed', 'numpy.random.seed', (['(49999)'], {}), '(49999)\n', (162, 169), False, 'import numpy\n'), ((188, 221), 'tensorflow.set_random_seed', 'tensorflow.set_random_seed', (['(49999)'], {}), '(49999)\n', (214, 221), False, 'import tensorflow\n'), ((752, 772), 'numpy.random.seed', 'np.random.seed', (['(1330)'], {}), '(1330)\n', (766, 772), True, 'import numpy as np\n'), ((783, 807), 'tensorflow.ConfigProto', 'tensorflow.ConfigProto', ([], {}), '()\n', (805, 807), False, 'import tensorflow\n'), ((854, 887), 'tensorflow.Session', 'tensorflow.Session', ([], {'config': 'config'}), '(config=config)\n', (872, 887), False, 'import tensorflow\n'), ((1099, 1149), 'sys.path.insert', 'sys.path.insert', (['(0)', "config['zmodel']['model_path']"], {}), "(0, config['zmodel']['model_path'])\n", (1114, 1149), False, 'import sys\n'), ((1470, 1520), 'sys.path.insert', 'sys.path.insert', (['(0)', "config['kmodel']['model_path']"], {}), "(0, config['kmodel']['model_path'])\n", (1485, 1520), False, 'import sys\n'), ((2227, 2266), 'keras.backend.maximum', 'KK.maximum', (['(0.0)', '(margin + y_neg - y_pos)'], {}), '(0.0, margin + y_neg - y_pos)\n', (2237, 2266), True, 'import keras.backend as KK\n'), ((2277, 2290), 'keras.backend.mean', 'KK.mean', (['loss'], {}), '(loss)\n', (2284, 2290), True, 'import keras.backend as KK\n'), ((2707, 2737), 'numpy.concatenate', 'np.concatenate', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (2721, 2737), True, 'import numpy as np\n'), ((6639, 6652), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6650, 6652), False, 'from collections import OrderedDict\n'), ((6675, 6688), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6686, 6688), False, 'from collections import OrderedDict\n'), ((8009, 8022), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8020, 8022), False, 'from collections import OrderedDict\n'), ((8038, 8051), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8049, 8051), False, 'from collections import OrderedDict\n'), ((8916, 8929), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8927, 8929), False, 'from collections import OrderedDict\n'), ((11917, 11974), 'bigdl.keras.converter.WeightsConverter.to_bigdl_weights', 'WeightsConverter.to_bigdl_weights', (['klayer', 'klayer_weights'], {}), '(klayer, klayer_weights)\n', (11950, 11974), False, 'from bigdl.keras.converter import WeightsConverter\n'), ((13424, 13437), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13435, 13437), False, 'from collections import OrderedDict\n'), ((14544, 14557), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14555, 14557), False, 'from collections import OrderedDict\n'), ((15682, 15695), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15693, 15695), False, 'from collections import OrderedDict\n'), ((19783, 19808), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (19806, 19808), False, 'import argparse\n'), ((2077, 2131), 'keras.layers.Lambda', 'klayers.Lambda', (['(lambda a: a[::2, :])'], {'output_shape': '(1,)'}), '(lambda a: a[::2, :], output_shape=(1,))\n', (2091, 2131), True, 'import keras.layers as klayers\n'), ((2152, 2207), 'keras.layers.Lambda', 'klayers.Lambda', (['(lambda a: a[1::2, :])'], {'output_shape': '(1,)'}), '(lambda a: a[1::2, :], output_shape=(1,))\n', (2166, 2207), True, 'import keras.layers as klayers\n'), ((2386, 2428), 'numpy.stack', 'np.stack', (['(query[i], query[i + 1])'], {'axis': '(0)'}), '((query[i], query[i + 1]), axis=0)\n', (2394, 2428), True, 'import numpy as np\n'), ((2442, 2480), 'numpy.stack', 'np.stack', (['(doc[i], doc[i + 1])'], {'axis': '(0)'}), '((doc[i], doc[i + 1]), axis=0)\n', (2450, 2480), True, 'import numpy as np\n'), ((2493, 2525), 'numpy.concatenate', 'np.concatenate', (['[t1, t2]'], {'axis': '(1)'}), '([t1, t2], axis=1)\n', (2507, 2525), True, 'import numpy as np\n'), ((4335, 4353), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4351, 4353), False, 'import sys\n'), ((5407, 5435), 'json.dumps', 'json.dumps', (['config'], {'indent': '(2)'}), '(config, indent=2)\n', (5417, 5435), False, 'import json\n'), ((6061, 6122), 'numpy.zeros', 'np.zeros', (["(share_input_conf['embed_size'],)"], {'dtype': 'np.float32'}), "((share_input_conf['embed_size'],), dtype=np.float32)\n", (6069, 6122), True, 'import numpy as np\n'), ((8258, 8288), 'inputs.get', 'inputs.get', (["conf['input_type']"], {}), "(conf['input_type'])\n", (8268, 8288), False, 'import inputs\n'), ((8546, 8576), 'inputs.get', 'inputs.get', (["conf['input_type']"], {}), "(conf['input_type'])\n", (8556, 8576), False, 'import inputs\n'), ((12503, 12531), 'json.dumps', 'json.dumps', (['config'], {'indent': '(2)'}), '(config, indent=2)\n', (12513, 12531), False, 'import json\n'), ((12842, 12903), 'numpy.zeros', 'np.zeros', (["(share_input_conf['embed_size'],)"], {'dtype': 'np.float32'}), "((share_input_conf['embed_size'],), dtype=np.float32)\n", (12850, 12903), True, 'import numpy as np\n'), ((14766, 14796), 'inputs.get', 'inputs.get', (["conf['input_type']"], {}), "(conf['input_type'])\n", (14776, 14796), False, 'import inputs\n'), ((19713, 19731), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (19729, 19731), False, 'import sys\n'), ((20189, 20201), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20198, 20201), False, 'import json\n'), ((3245, 3319), 'keras.engine.training._standardize_input_data', '_standardize_input_data', (['input_data', 'names', 'shapes'], {'check_batch_axis': '(False)'}), '(input_data, names, shapes, check_batch_axis=False)\n', (3268, 3319), False, 'from keras.engine.training import _standardize_input_data\n'), ((3359, 3423), 'numpy.concatenate', 'np.concatenate', (['(list_input_data[0], list_input_data[1])'], {'axis': '(1)'}), '((list_input_data[0], list_input_data[1]), axis=1)\n', (3373, 3423), True, 'import numpy as np\n'), ((6151, 6249), 'numpy.random.uniform', 'np.random.uniform', (['(-0.2)', '(0.2)', "[share_input_conf['vocab_size'], share_input_conf['embed_size']]"], {}), "(-0.2, 0.2, [share_input_conf['vocab_size'],\n share_input_conf['embed_size']])\n", (6168, 6249), True, 'import numpy as np\n'), ((6369, 6467), 'numpy.random.uniform', 'np.random.uniform', (['(-0.2)', '(0.2)', "[share_input_conf['vocab_size'], share_input_conf['embed_size']]"], {}), "(-0.2, 0.2, [share_input_conf['vocab_size'],\n share_input_conf['embed_size']])\n", (6386, 6467), True, 'import numpy as np\n'), ((9178, 9195), 'metrics.get', 'metrics.get', (['mobj'], {}), '(mobj)\n', (9189, 9195), False, 'import metrics\n'), ((12932, 13032), 'numpy.random.uniform', 'np.random.uniform', (['(-0.02)', '(0.02)', "[share_input_conf['vocab_size'], share_input_conf['embed_size']]"], {}), "(-0.02, 0.02, [share_input_conf['vocab_size'],\n share_input_conf['embed_size']])\n", (12949, 13032), True, 'import numpy as np\n'), ((13152, 13250), 'numpy.random.uniform', 'np.random.uniform', (['(-0.2)', '(0.2)', "[share_input_conf['vocab_size'], share_input_conf['embed_size']]"], {}), "(-0.2, 0.2, [share_input_conf['vocab_size'],\n share_input_conf['embed_size']])\n", (13169, 13250), True, 'import numpy as np\n'), ((15944, 15961), 'metrics.get', 'metrics.get', (['mobj'], {}), '(mobj)\n', (15955, 15961), False, 'import metrics\n'), ((16908, 16982), 'keras.engine.training._standardize_input_data', '_standardize_input_data', (['input_data', 'names', 'shapes'], {'check_batch_axis': '(False)'}), '(input_data, names, shapes, check_batch_axis=False)\n', (16931, 16982), False, 'from keras.engine.training import _standardize_input_data\n'), ((17204, 17256), 'numpy.allclose', 'np.allclose', (['y_pred', 'ky_pred'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(y_pred, ky_pred, rtol=1e-05, atol=1e-05)\n', (17215, 17256), True, 'import numpy as np\n'), ((5078, 5152), 'keras.engine.training._standardize_input_data', '_standardize_input_data', (['input_data', 'names', 'shapes'], {'check_batch_axis': '(False)'}), '(input_data, names, shapes, check_batch_axis=False)\n', (5101, 5152), False, 'from keras.engine.training import _standardize_input_data\n'), ((5297, 5328), 'numpy.expand_dims', 'np.expand_dims', (['y_true_value', '(1)'], {}), '(y_true_value, 1)\n', (5311, 5328), True, 'import numpy as np\n'), ((9098, 9117), 'metrics.get', 'metrics.get', (['mt_key'], {}), '(mt_key)\n', (9109, 9117), False, 'import metrics\n'), ((15864, 15883), 'metrics.get', 'metrics.get', (['mt_key'], {}), '(mt_key)\n', (15875, 15883), False, 'import metrics\n'), ((17766, 17784), 'numpy.squeeze', 'np.squeeze', (['y_pred'], {}), '(y_pred)\n', (17776, 17784), True, 'import numpy as np\n'), ((2981, 2992), 'time.time', 'time.time', ([], {}), '()\n', (2990, 2992), False, 'import time\n'), ((16605, 16616), 'time.time', 'time.time', ([], {}), '()\n', (16614, 16616), False, 'import time\n')] |
import numpy as np
import pandas as pd
import streamlit as st
import matplotlib.pyplot as plt
import datetime as dt
plt.style.use('classic')
st.title('MaxiMiser')
st.write('')
st.write('Simple bonus optimization strategy of opening new checking or saving accounts')
st.markdown('[Project Slides](https://docs.google.com/presentation/d/e/2PACX-1vQG7KDThs3jOf9UwIpNeecrSf3KeJOFONO7UD6K0Eert46p6hyDwPmi1LgHxhtopPe9D5l68MMOZaBq/pub?start=false&loop=false&delayms=3000)')
st.markdown('[Project Github](https://github.com/mjiang5/maximiser)')
scraped_df = pd.read_csv('data/real_time_bonuses.csv')
scraped_df = scraped_df.iloc[:, 1:]
scraped_df = scraped_df.drop(columns=['post_link'])
scraped_df.rename(columns={'expiration_date': 'open_date'}, inplace=True)
scraped_df['month'] = scraped_df['open_date'].apply(lambda x: pd.to_datetime(x).month)
scraped_df['keep_time'] = scraped_df['keep_time'].astype('int')
scraped_df['ini_month'] = scraped_df['month']+1
scraped_df['end_month'] = scraped_df['ini_month'] + scraped_df['keep_time']-1
bonus_df = scraped_df
st.sidebar.subheader('Your requirements:')
budget = st.sidebar.number_input('Maximum monthly direct deposit:', min_value=100.0, step=500.0)
num_account = st.sidebar.number_input('Maximum number of accounts:', min_value=1.0, step=1.0, max_value=6.0)
######## build the optimal strategy ###########
from pulp import *
problem = LpProblem("Bonus Portfolio", LpMaximize)
bonus_title = list(bonus_df['title'])
months = list(range(2, 14))
bonus_amount = dict(zip(bonus_title, bonus_df['bonus']))
monthly_dd_amount = dict(zip(bonus_title, bonus_df['monthly_dd']))
account_keeping_time = dict(zip(bonus_title, bonus_df['keep_time']))
ini_month = dict(zip(bonus_title, bonus_df['ini_month']))
end_month = dict(zip(bonus_title, bonus_df['end_month']))
open_date = dict(zip(bonus_title, bonus_df['open_date']))
# vector of variables
holdings = LpVariable.dicts("offer", bonus_title, cat='Binary')
for m in months:
problem += lpSum([monthly_dd_amount[i]*holdings[i] for i in bonus_title if (m<=end_month[i] and m>=ini_month[i])]) <= budget
problem += lpSum([holdings[i] for i in bonus_title]) <= num_account
# objective function
problem += lpSum([bonus_amount[i]*holdings[i] for i in bonus_title])
problem.solve()
status = LpStatus[problem.status]
total_return = value(problem.objective)
optimal_holding = pd.DataFrame(columns=['Offer', 'Monthly Direct Deposit', 'Bonus', 'Open Date', 'Keeping Time (Month)'])
i = 0
for v in holdings:
if holdings[v].varValue == 1:
optimal_holding.loc[i, 'Offer'] = v
optimal_holding.loc[i, 'Monthly Direct Deposit'] = monthly_dd_amount[v]
optimal_holding.loc[i, 'Bonus'] = bonus_amount[v]
optimal_holding.loc[i, 'Open Date'] = open_date[v]
optimal_holding.loc[i, 'Keeping Time (Month)'] = account_keeping_time[v] #int()?
i += 1
base_day = dt.datetime(2020, 1, 1)
optimal_holding['Close Date'] = optimal_holding['Keeping Time (Month)'].apply(lambda x: dt.timedelta(31*x))
optimal_holding['Close Date'] += optimal_holding['Open Date'].apply(pd.to_datetime)
optimal_holding['Close Date'] = optimal_holding['Close Date'].dt.strftime('%m-%d-%Y')
optimal_holding['Open Date'] = pd.to_datetime(optimal_holding['Open Date'])
optimal_holding['Close Date'] = pd.to_datetime(optimal_holding['Close Date'])
optimal_holding['open'] = optimal_holding['Open Date']-base_day
optimal_holding['close'] = optimal_holding['Close Date']-base_day
optimal_holding['Open Date'] = optimal_holding['Open Date'].dt.strftime('%m-%d-%Y')
optimal_holding['Close Date'] = optimal_holding['Close Date'].dt.strftime('%m-%d-%Y')
open_date = [optimal_holding.loc[i, 'open'].days for i in optimal_holding.index]
close_date = [optimal_holding.loc[i, 'close'].days for i in optimal_holding.index]
offers = ['Bonus {}'.format(i) for i in optimal_holding.index]
from matplotlib.ticker import MultipleLocator
fig1 = plt.figure(figsize=(18, 5))
ax = fig1.add_subplot(111)
for i in range(len(open_date)):
plt.plot([open_date[i], close_date[i]], [i+0.5, i+0.5], lw=55, c='blue') #########
plt.xlim(0, 365*2)
plt.ylim(0, len(open_date))
ax.set_aspect(40)
plt.tick_params(axis='x', which='both', bottom=False, top=False)
plt.tick_params(axis='y', which='both', left=False, right=False)
xlist = [0, 90, 181, 273, 365, 455, 546, 638, 730]
xlabels=['2019-12', '2020-03', '2020-06', '2020-9', '2020-12', '2021-03', '2021-06', '2021-09', '2021-12']
plt.xticks(xlist, xlabels)
plt.yticks(np.arange(0.5, len(open_date)+0.5), offers)
ax.xaxis.grid(True, which='major', color='w', lw=1, linestyle='solid')
ax.yaxis.grid(True, which='minor', color='w', lw=1, linestyle='solid')
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_minor_locator(MultipleLocator(1))
# hide axis spines
for spine in ax.spines.values():
spine.set_visible(False)
ax.set_facecolor('lavender')
fig1.patch.set_alpha(0.)
plt.tight_layout()
st.sidebar.subheader('')
st.sidebar.subheader('')
holding_cols = ['Offer', 'Open Date', 'Keeping Time (Month)', 'Close Date', 'Monthly Direct Deposit', 'Bonus']
st.subheader('Optimal Strategy:')
st.table(optimal_holding[holding_cols])
st.subheader('Optimal Return:')
st.write(total_return)
st.pyplot(fig1, dpi=200)
######## build the lazy strategy ###########
lazy_df = bonus_df
lazy_budget = budget
lazy_num_account = 1
lazy_holding = pd.DataFrame()
lazy_df = lazy_df[lazy_df['monthly_dd'] <= lazy_budget]
lazy_df = lazy_df.sort_values(by=['bonus'], ascending=False)
lazy = lazy_df.iloc[0]
lazy_holding = lazy_holding.append(lazy_df.iloc[0])
lazy_total_return = np.sum(lazy_holding['bonus'])
lazy_holding.reset_index(drop=True, inplace=True)
cols = ['title', 'monthly_dd', 'bonus', 'open_date', 'keep_time']
lazy_holding = lazy_holding[cols].rename(columns={'title': 'Offer',
'monthly_dd': 'Monthly Direct Deposit',
'bonus': 'Bonus',
'open_date': 'Open Date',
'keep_time': 'Keeping Time (Month)'})
# lazy_holding
lazy_holding['Close Date'] = lazy_holding['Keeping Time (Month)'].apply(lambda x: dt.timedelta(31*x))
lazy_holding['Close Date'] += pd.to_datetime(lazy_holding['Open Date'])
lazy_holding['Close Date'] = lazy_holding['Close Date'].dt.strftime('%Y-%m-%d')
lazy_holding['Open Date'] = pd.to_datetime(lazy_holding['Open Date'])
lazy_holding['Close Date'] = pd.to_datetime(lazy_holding['Close Date'])
lazy_holding['open'] = lazy_holding['Open Date']-base_day
lazy_holding['close'] = lazy_holding['Close Date']-base_day
lazy_holding['Open Date'] = lazy_holding['Open Date'].dt.strftime('%m-%d-%Y')
lazy_holding['Close Date'] = lazy_holding['Close Date'].dt.strftime('%m-%d-%Y')
lazy_open_date = [lazy_holding.loc[i, 'open'].days for i in lazy_holding.index]
lazy_close_date = [lazy_holding.loc[i, 'close'].days for i in lazy_holding.index]
lazy_offers = ['Bonus {}'.format(i) for i in lazy_holding.index]
fig2 = plt.figure(figsize=(18, 5))
ax = fig2.add_subplot(111)
for i in range(len(lazy_open_date)):
plt.plot([lazy_open_date[i], lazy_close_date[i]], [i+0.5, i+0.5], lw=55, c='blue')
plt.xlim(0, 365*2)
plt.ylim(0, len(lazy_open_date))
ax.set_aspect(40)
plt.tick_params(axis='x', which='both', bottom=False, top=False)
plt.tick_params(axis='y', which='both', left=False, right=False)
xlist = [0, 90, 181, 273, 365, 455, 546, 638, 730]
xlabels=['2019-12', '2020-03', '2020-06', '2020-9', '2020-12', '2021-03', '2021-06', '2021-09', '2021-12']
plt.xticks(xlist, xlabels)
plt.yticks(np.arange(0.5, len(lazy_open_date)+0.5), lazy_offers)
ax.xaxis.grid(True, which='major', color='w', lw=1, linestyle='solid')
ax.yaxis.grid(True, which='minor', color='w', lw=1, linestyle='solid')
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_minor_locator(MultipleLocator(1))
# hide axis spines
for spine in ax.spines.values():
spine.set_visible(False)
ax.set_facecolor('lavender')
fig2.patch.set_alpha(0.)
plt.tight_layout()
st.sidebar.subheader('Validation')
st.sidebar.markdown('Optimal Strategy Return: '+str(total_return))
st.sidebar.markdown('vs')
st.sidebar.markdown('Lazy Strategy Return: '+str(lazy_total_return))
st.sidebar.subheader('')
st.sidebar.subheader('')
st.subheader('Lazy Strategy:')
st.table(lazy_holding[holding_cols])
st.subheader('Lazy Strategy Return:')
st.write(lazy_total_return)
st.pyplot(fig2, dpi=200)
| [
"streamlit.table",
"pandas.read_csv",
"streamlit.sidebar.number_input",
"datetime.timedelta",
"pandas.to_datetime",
"streamlit.title",
"datetime.datetime",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"streamlit.sidebar.markdown",
"pandas.DataFrame",
"streamlit.markdown",
"matplo... | [((116, 140), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""classic"""'], {}), "('classic')\n", (129, 140), True, 'import matplotlib.pyplot as plt\n'), ((142, 163), 'streamlit.title', 'st.title', (['"""MaxiMiser"""'], {}), "('MaxiMiser')\n", (150, 163), True, 'import streamlit as st\n'), ((164, 176), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (172, 176), True, 'import streamlit as st\n'), ((177, 276), 'streamlit.write', 'st.write', (['"""Simple bonus optimization strategy of opening new checking or saving accounts"""'], {}), "(\n 'Simple bonus optimization strategy of opening new checking or saving accounts'\n )\n", (185, 276), True, 'import streamlit as st\n'), ((267, 477), 'streamlit.markdown', 'st.markdown', (['"""[Project Slides](https://docs.google.com/presentation/d/e/2PACX-1vQG7KDThs3jOf9UwIpNeecrSf3KeJOFONO7UD6K0Eert46p6hyDwPmi1LgHxhtopPe9D5l68MMOZaBq/pub?start=false&loop=false&delayms=3000)"""'], {}), "(\n '[Project Slides](https://docs.google.com/presentation/d/e/2PACX-1vQG7KDThs3jOf9UwIpNeecrSf3KeJOFONO7UD6K0Eert46p6hyDwPmi1LgHxhtopPe9D5l68MMOZaBq/pub?start=false&loop=false&delayms=3000)'\n )\n", (278, 477), True, 'import streamlit as st\n'), ((468, 537), 'streamlit.markdown', 'st.markdown', (['"""[Project Github](https://github.com/mjiang5/maximiser)"""'], {}), "('[Project Github](https://github.com/mjiang5/maximiser)')\n", (479, 537), True, 'import streamlit as st\n'), ((553, 594), 'pandas.read_csv', 'pd.read_csv', (['"""data/real_time_bonuses.csv"""'], {}), "('data/real_time_bonuses.csv')\n", (564, 594), True, 'import pandas as pd\n'), ((1060, 1102), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Your requirements:"""'], {}), "('Your requirements:')\n", (1080, 1102), True, 'import streamlit as st\n'), ((1112, 1203), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Maximum monthly direct deposit:"""'], {'min_value': '(100.0)', 'step': '(500.0)'}), "('Maximum monthly direct deposit:', min_value=100.0,\n step=500.0)\n", (1135, 1203), True, 'import streamlit as st\n'), ((1214, 1313), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Maximum number of accounts:"""'], {'min_value': '(1.0)', 'step': '(1.0)', 'max_value': '(6.0)'}), "('Maximum number of accounts:', min_value=1.0, step=\n 1.0, max_value=6.0)\n", (1237, 1313), True, 'import streamlit as st\n'), ((2384, 2491), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Offer', 'Monthly Direct Deposit', 'Bonus', 'Open Date',\n 'Keeping Time (Month)']"}), "(columns=['Offer', 'Monthly Direct Deposit', 'Bonus',\n 'Open Date', 'Keeping Time (Month)'])\n", (2396, 2491), True, 'import pandas as pd\n'), ((2907, 2930), 'datetime.datetime', 'dt.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (2918, 2930), True, 'import datetime as dt\n'), ((3242, 3286), 'pandas.to_datetime', 'pd.to_datetime', (["optimal_holding['Open Date']"], {}), "(optimal_holding['Open Date'])\n", (3256, 3286), True, 'import pandas as pd\n'), ((3319, 3364), 'pandas.to_datetime', 'pd.to_datetime', (["optimal_holding['Close Date']"], {}), "(optimal_holding['Close Date'])\n", (3333, 3364), True, 'import pandas as pd\n'), ((3986, 4013), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 5)'}), '(figsize=(18, 5))\n', (3996, 4013), True, 'import matplotlib.pyplot as plt\n'), ((4164, 4184), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(365 * 2)'], {}), '(0, 365 * 2)\n', (4172, 4184), True, 'import matplotlib.pyplot as plt\n'), ((4229, 4293), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)'}), "(axis='x', which='both', bottom=False, top=False)\n", (4244, 4293), True, 'import matplotlib.pyplot as plt\n'), ((4294, 4358), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'left': '(False)', 'right': '(False)'}), "(axis='y', which='both', left=False, right=False)\n", (4309, 4358), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4543), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xlist', 'xlabels'], {}), '(xlist, xlabels)\n', (4527, 4543), True, 'import matplotlib.pyplot as plt\n'), ((4977, 4995), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4993, 4995), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5021), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['""""""'], {}), "('')\n", (5017, 5021), True, 'import streamlit as st\n'), ((5022, 5046), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['""""""'], {}), "('')\n", (5042, 5046), True, 'import streamlit as st\n'), ((5160, 5193), 'streamlit.subheader', 'st.subheader', (['"""Optimal Strategy:"""'], {}), "('Optimal Strategy:')\n", (5172, 5193), True, 'import streamlit as st\n'), ((5194, 5233), 'streamlit.table', 'st.table', (['optimal_holding[holding_cols]'], {}), '(optimal_holding[holding_cols])\n', (5202, 5233), True, 'import streamlit as st\n'), ((5234, 5265), 'streamlit.subheader', 'st.subheader', (['"""Optimal Return:"""'], {}), "('Optimal Return:')\n", (5246, 5265), True, 'import streamlit as st\n'), ((5266, 5288), 'streamlit.write', 'st.write', (['total_return'], {}), '(total_return)\n', (5274, 5288), True, 'import streamlit as st\n'), ((5289, 5313), 'streamlit.pyplot', 'st.pyplot', (['fig1'], {'dpi': '(200)'}), '(fig1, dpi=200)\n', (5298, 5313), True, 'import streamlit as st\n'), ((5446, 5460), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5458, 5460), True, 'import pandas as pd\n'), ((5675, 5704), 'numpy.sum', 'np.sum', (["lazy_holding['bonus']"], {}), "(lazy_holding['bonus'])\n", (5681, 5704), True, 'import numpy as np\n'), ((6362, 6403), 'pandas.to_datetime', 'pd.to_datetime', (["lazy_holding['Open Date']"], {}), "(lazy_holding['Open Date'])\n", (6376, 6403), True, 'import pandas as pd\n'), ((6513, 6554), 'pandas.to_datetime', 'pd.to_datetime', (["lazy_holding['Open Date']"], {}), "(lazy_holding['Open Date'])\n", (6527, 6554), True, 'import pandas as pd\n'), ((6584, 6626), 'pandas.to_datetime', 'pd.to_datetime', (["lazy_holding['Close Date']"], {}), "(lazy_holding['Close Date'])\n", (6598, 6626), True, 'import pandas as pd\n'), ((7175, 7202), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 5)'}), '(figsize=(18, 5))\n', (7185, 7202), True, 'import matplotlib.pyplot as plt\n'), ((7361, 7381), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(365 * 2)'], {}), '(0, 365 * 2)\n', (7369, 7381), True, 'import matplotlib.pyplot as plt\n'), ((7431, 7495), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)'}), "(axis='x', which='both', bottom=False, top=False)\n", (7446, 7495), True, 'import matplotlib.pyplot as plt\n'), ((7496, 7560), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'left': '(False)', 'right': '(False)'}), "(axis='y', which='both', left=False, right=False)\n", (7511, 7560), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7745), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xlist', 'xlabels'], {}), '(xlist, xlabels)\n', (7729, 7745), True, 'import matplotlib.pyplot as plt\n'), ((8189, 8207), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8205, 8207), True, 'import matplotlib.pyplot as plt\n'), ((8209, 8243), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Validation"""'], {}), "('Validation')\n", (8229, 8243), True, 'import streamlit as st\n'), ((8311, 8336), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""vs"""'], {}), "('vs')\n", (8330, 8336), True, 'import streamlit as st\n'), ((8407, 8431), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['""""""'], {}), "('')\n", (8427, 8431), True, 'import streamlit as st\n'), ((8432, 8456), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['""""""'], {}), "('')\n", (8452, 8456), True, 'import streamlit as st\n'), ((8458, 8488), 'streamlit.subheader', 'st.subheader', (['"""Lazy Strategy:"""'], {}), "('Lazy Strategy:')\n", (8470, 8488), True, 'import streamlit as st\n'), ((8489, 8525), 'streamlit.table', 'st.table', (['lazy_holding[holding_cols]'], {}), '(lazy_holding[holding_cols])\n', (8497, 8525), True, 'import streamlit as st\n'), ((8526, 8563), 'streamlit.subheader', 'st.subheader', (['"""Lazy Strategy Return:"""'], {}), "('Lazy Strategy Return:')\n", (8538, 8563), True, 'import streamlit as st\n'), ((8564, 8591), 'streamlit.write', 'st.write', (['lazy_total_return'], {}), '(lazy_total_return)\n', (8572, 8591), True, 'import streamlit as st\n'), ((8592, 8616), 'streamlit.pyplot', 'st.pyplot', (['fig2'], {'dpi': '(200)'}), '(fig2, dpi=200)\n', (8601, 8616), True, 'import streamlit as st\n'), ((4078, 4154), 'matplotlib.pyplot.plot', 'plt.plot', (['[open_date[i], close_date[i]]', '[i + 0.5, i + 0.5]'], {'lw': '(55)', 'c': '"""blue"""'}), "([open_date[i], close_date[i]], [i + 0.5, i + 0.5], lw=55, c='blue')\n", (4086, 4154), True, 'import matplotlib.pyplot as plt\n'), ((4769, 4787), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (4784, 4787), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4816, 4834), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (4831, 4834), False, 'from matplotlib.ticker import MultipleLocator\n'), ((7273, 7363), 'matplotlib.pyplot.plot', 'plt.plot', (['[lazy_open_date[i], lazy_close_date[i]]', '[i + 0.5, i + 0.5]'], {'lw': '(55)', 'c': '"""blue"""'}), "([lazy_open_date[i], lazy_close_date[i]], [i + 0.5, i + 0.5], lw=55,\n c='blue')\n", (7281, 7363), True, 'import matplotlib.pyplot as plt\n'), ((7981, 7999), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (7996, 7999), False, 'from matplotlib.ticker import MultipleLocator\n'), ((8028, 8046), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (8043, 8046), False, 'from matplotlib.ticker import MultipleLocator\n'), ((3020, 3040), 'datetime.timedelta', 'dt.timedelta', (['(31 * x)'], {}), '(31 * x)\n', (3032, 3040), True, 'import datetime as dt\n'), ((6312, 6332), 'datetime.timedelta', 'dt.timedelta', (['(31 * x)'], {}), '(31 * x)\n', (6324, 6332), True, 'import datetime as dt\n'), ((821, 838), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {}), '(x)\n', (835, 838), True, 'import pandas as pd\n')] |
#!/usr/bin/env
"""
CTDvPrawler_plot.py
Plot data from cruises
Currently
---------
ctd plots
plots prawler data as secondary
Input - CruiseID
Compatibility:
==============
python >=3.6
python 2.7
"""
from __future__ import absolute_import, division, print_function
import argparse
import datetime
import os
import matplotlib as mpl
import numpy as np
from netCDF4 import Dataset
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from plots.profile_plot import CTDProfilePlot
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(1, parent_dir)
from calc.EPIC2Datetime import EPIC2Datetime, get_UDUNITS
from calc.haversine import distance
from io_utils import ConfigParserLocal
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime.datetime(2016, 8, 22)
__modified__ = datetime.datetime(2016, 8, 24)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'CTD', 'Plots', 'Cruise', 'QC'
"""--------------------------------Plot Routines---------------------------------------"""
def twovar_minmax_plotbounds(var1,var2):
"""expects missing values to be np.nan"""
if np.isnan(var1).all() and np.isnan(var2).all():
min_bound = -1
max_bound = 1
elif np.isnan(var1).all() and not np.isnan(var2).all():
min_bound = var2[~np.isnan(var2)].min()
max_bound = var2[~np.isnan(var2)].max()
elif np.isnan(var2).all() and not np.isnan(var1).all():
min_bound = var1[~np.isnan(var1)].min()
max_bound = var1[~np.isnan(var1)].max()
else:
min_bound = np.min((var1[~np.isnan(var1)].min(), var2[~np.isnan(var2)].min()))
max_bound = np.max((var1[~np.isnan(var1)].max(), var2[~np.isnan(var2)].max()))
return (min_bound, max_bound)
"""------------------------------------- Main -----------------------------------------"""
parser = argparse.ArgumentParser(description='CTD plots')
parser.add_argument('DataPath', metavar='DataPath', type=str,help='full path to directory of processed nc files')
parser.add_argument('PrawlerDataPath', metavar='PrawlerDataPath', type=str,help='full path to directory of processed prawler nc files')
parser.add_argument('PrawlerProfileID', metavar='PrawlerProfileID', type=int,help='sequential prawler profile id')
parser.add_argument('-TSvD','--TSvD', action="store_true",
help='Temperature, Salinity, SigmaT vs depth')
parser.add_argument('-OxyFluor','--OxyFluor', action="store_true",
help='Temperature, Oxygen, Fluorometer vs depth')
parser.add_argument('-OxyConcFluor','--OxyConcFluor', action="store_true",
help='Temperature, Oxygen Concentration, Fluorometer vs depth')
parser.add_argument('-ParTurbFluor','--ParTurbFluor', action="store_true",
help='PAR, Turbidity, Fluorometer vs depth')
parser.add_argument('-ParFluor','--ParFluor', action="store_true",
help='PAR, Fluorometer vs depth')
parser.add_argument('-TurbFluor','--TurbFluor', action="store_true",
help='Turbidity, Fluorometer vs depth (common for only Eco')
parser.add_argument('-ParTransFluor','--ParTransFluor', action="store_true",
help='Transmissometer, Turbidity, Fluorometer vs depth (common package for EMA)')
parser.add_argument('-TransTurbFluor','--TransTurbFluor', action="store_true",
help='Transmissometer, Turbidity, Fluorometer vs depth (common package for EMA)')
parser.add_argument('-TransFluor','--TransFluor', action="store_true",
help='Transmissometer, Fluorometer vs depth (common package for EMA)')
args = parser.parse_args()
ncfile=args.DataPath
print("Working on file {file} ").format(file=ncfile)
nc = EcoFOCI_netCDF(ncfile)
ncdata = nc.ncreadfile_dic()
g_atts = nc.get_global_atts()
nc.close()
cast_time = EPIC2Datetime(ncdata['time'],ncdata['time2'])[0]
ncfilep=args.PrawlerDataPath
print("Working on file {file} ").format(file=ncfilep)
nc = EcoFOCI_netCDF(ncfilep)
ncdatap = nc.ncreadfile_dic()
g_attsp = nc.get_global_atts()
nc.close()
if np.ndim(ncdata['dep']) == 1:
ydata = ncdata['dep'][:]
else:
ydata = ncdata['dep'][0,:,0,0]
for dkey in ncdata.keys():
if not dkey in ['lat','lon','depth','dep','time','time2']:
ncdata[dkey][0,ncdata[dkey][0,:,0,0] >= 1e30,0,0] = np.nan
for dkey in ncdatap.keys():
ncdatap[dkey][ncdatap[dkey] >= 1e30] = np.nan
if not os.path.exists('images/' + g_atts['CRUISE']):
os.makedirs('images/' + g_atts['CRUISE'])
if not os.path.exists('images/' + g_atts['CRUISE'] + '/TSSigma/'):
os.makedirs('images/' + g_atts['CRUISE'] + '/TSSigma/')
if not os.path.exists('images/' + g_atts['CRUISE'] + '/TO2F/'):
os.makedirs('images/' + g_atts['CRUISE'] + '/TO2F/')
if not os.path.exists('images/' + g_atts['CRUISE'] + '/TO2concF/'):
os.makedirs('images/' + g_atts['CRUISE'] + '/TO2concF/')
if not os.path.exists('images/' + g_atts['CRUISE'] + '/PARTurbFluor/'):
os.makedirs('images/' + g_atts['CRUISE'] + '/PARTurbFluor/')
if not os.path.exists('images/' + g_atts['CRUISE'] + '/TurbFluor/'):
os.makedirs('images/' + g_atts['CRUISE'] + '/TurbFluor/')
if not os.path.exists('images/' + g_atts['CRUISE'] + '/ParTransFluor/'):
os.makedirs('images/' + g_atts['CRUISE'] + '/ParTransFluor/')
if not os.path.exists('images/' + g_atts['CRUISE'] + '/TransTurbFluor/'):
os.makedirs('images/' + g_atts['CRUISE'] + '/TransTurbFluor/')
if not os.path.exists('images/' + g_atts['CRUISE'] + '/TransFluor/'):
os.makedirs('images/' + g_atts['CRUISE'] + '/TransFluor/')
try:
g_atts['STATION_NAME'] = g_atts['STATION_NAME']
except:
g_atts['STATION_NAME'] = 'NA'
if args.TSvD:
CTDplot = CTDProfilePlot()
(plt, fig) = CTDplot.plot3var2y(epic_key=['T_28','T2_35','S_41','S_42','ST_70','ST_2070'],
xdata=[ncdata['T_28'][0,:,0,0],ncdatap['Temperature'][args.PrawlerProfileID],
ncdata['S_41'][0,:,0,0],ncdatap['Salinity'][args.PrawlerProfileID],
ncdata['ST_70'][0,:,0,0],ncdatap['SigmaT'][args.PrawlerProfileID]],
ydata=ydata,
ydata2=ncdatap['Depth'][args.PrawlerProfileID],
xlabel=['Temperature (C)','Salinity (PSU)','SigmaT (kg/m^3)'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/TSSigma/' + ncfile.split('/')[-1].split('.')[0] + '_plot_2TSSigma.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.OxyFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot3var2y(epic_key=['T_28','T2_35','OST_62','CTDOST_4220',fluor_key,'Prawler_Fluor'],
xdata=[ncdata['T_28'][0,:,0,0],ncdatap['Temperature'][args.PrawlerProfileID],
ncdata['OST_62'][0,:,0,0],ncdatap['Oxy_Sat'][args.PrawlerProfileID],
ncdata[fluor_key][0,:,0,0],ncdatap['Chlorophyll'][args.PrawlerProfileID]],
ydata=ydata,
ydata2=ncdatap['Depth'][args.PrawlerProfileID],
xlabel=['Temperature (C)','Oxygen % Sat.','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/TO2F/' + ncfile.split('/')[-1].split('.')[0] + '_plot_TO2F.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.OxyFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot3var2y(epic_key=['T_28','T2_35','O_65','CTDOXY_4221',fluor_key,'Prawler_Fluor'],
xdata=[ncdata['T_28'][0,:,0,0],ncdatap['Temperature'][args.PrawlerProfileID],
ncdata['O_65'][0,:,0,0],ncdatap['Oxy_Conc'][args.PrawlerProfileID],
ncdata[fluor_key][0,:,0,0],ncdatap['Chlorophyll'][args.PrawlerProfileID]],
ydata=ydata,
ydata2=ncdatap['Depth'][args.PrawlerProfileID],
xlabel=['Temperature (C)','Oxygen Conc.','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/TO2concF/' + ncfile.split('/')[-1].split('.')[0] + '_plot_TO2concF.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.ParFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot2var(epic_key=['PAR_905','',fluor_key,''],
xdata=[ncdata['PAR_905'][0,:,0,0],np.array([]),
ncdata[fluor_key][0,:,0,0],np.array([])],
ydata=ydata,
xlabel=['PAR','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/PARFluor/' + ncfile.split('/')[-1].split('.')[0] + '_plot_PARFluor.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.TurbFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot2var(epic_key=['Trb_980','',fluor_key,''],
xdata=[ncdata['Trb_980'][0,:,0,0],np.array([]),
ncdata[fluor_key][0,:,0,0],np.array([])],
ydata=ydata,
xlabel=['Turbidity','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/TurbFluor/' + ncfile.split('/')[-1].split('.')[0] + '_plot_TurbFluor.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.ParTurbFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot3var(epic_key=['PAR_905','','Trb_980','',fluor_key,''],
xdata=[ncdata['PAR_905'][0,:,0,0],np.array([]),
ncdata['Trb_980'][0,:,0,0],np.array([]),
ncdata[fluor_key][0,:,0,0],np.array([])],
ydata=ydata,
xlabel=['PAR','Turbidity','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/PARTurbFluor/' + ncfile.split('/')[-1].split('.')[0] + '_plot_PARTurbFluor.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.ParTransFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot3var(epic_key=['PAR_905','','Tr_904','',fluor_key,''],
xdata=[ncdata['PAR_905'][0,:,0,0],np.array([]),
ncdata['Tr_904'][0,:,0,0],np.array([]),
ncdata[fluor_key][0,:,0,0],np.array([])],
ydata=ydata,
xlabel=['PAR','Trans. %','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/ParTransFluor/' + ncfile.split('/')[-1].split('.')[0] + '_plot_PARTransFluor.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.ParTurbFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot3var(epic_key=['PAR_905','','Trb_980','',fluor_key,''],
xdata=[ncdata['PAR_905'][0,:,0,0],np.array([]),
ncdata['Trb_980'][0,:,0,0],np.array([]),
ncdata[fluor_key][0,:,0,0],np.array([])],
ydata=ydata,
xlabel=['PAR','Turbidity','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/PARTurbFluor/' + ncfile.split('/')[-1].split('.')[0] + '_plot_PARTurbFluor.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.TransTurbFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot3var(epic_key=['Tr_904','','Trb_980','',fluor_key,''],
xdata=[ncdata['Tr_904'][0,:,0,0],np.array([]),
ncdata['Trb_980'][0,:,0,0],np.array([]),
ncdata[fluor_key][0,:,0,0],np.array([])],
ydata=ydata,
xlabel=['Trans. %','Turbidity','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/TransTurbFluor/' + ncfile.split('/')[-1].split('.')[0] + '_plot_TransTurbFluor.png', bbox_inches='tight', dpi = (300))
plt.close()
if args.TransFluor:
CTDplot = CTDProfilePlot()
fluor_key_list = ['F_903', 'Fch_906', 'fWS_973', 'Chl_933']
for fkey in fluor_key_list:
if fkey in ncdata.keys():
fluor_key = fkey
(plt, fig) = CTDplot.plot2var(epic_key=['Tr_904','',fluor_key,''],
xdata=[ncdata['Tr_904'][0,:,0,0],np.array([]),
ncdata[fluor_key][0,:,0,0],np.array([])],
ydata=ydata,
xlabel=['Trans. %','Chlor-A mg/m^3'],
secondary=True)
ptitle = CTDplot.add_title(cruiseid=g_atts['CRUISE'],
fileid=ncfile.split('/')[-1],
castid=g_atts['CAST'],
stationid=g_atts['STATION_NAME'],
castdate=cast_time,
lat=ncdata['lat'][0],
lon=ncdata['lon'][0])
t = fig.suptitle(ptitle)
t.set_y(1.06)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]*2) )
plt.savefig('images/' + g_atts['CRUISE'] + '/TransFluor/' + ncfile.split('/')[-1].split('.')[0] + '_plot_TransFluor.png', bbox_inches='tight', dpi = (300))
plt.close()
| [
"datetime.datetime",
"os.path.exists",
"plots.profile_plot.CTDProfilePlot",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.use",
"numpy.ndim",
"os.sys.path.insert",
"matplotlib.pyplot.close",
"numpy.array",
"io_utils.EcoFOCI_netCDF_read.EcoFOCI_netCDF",
"numpy.isnan",
"os.path.abspath... | [((394, 408), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (401, 408), True, 'import matplotlib as mpl\n'), ((610, 643), 'os.sys.path.insert', 'os.sys.path.insert', (['(1)', 'parent_dir'], {}), '(1, parent_dir)\n', (628, 643), False, 'import os\n'), ((898, 928), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(8)', '(22)'], {}), '(2016, 8, 22)\n', (915, 928), False, 'import datetime\n'), ((944, 974), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(8)', '(24)'], {}), '(2016, 8, 24)\n', (961, 974), False, 'import datetime\n'), ((1999, 2047), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CTD plots"""'}), "(description='CTD plots')\n", (2022, 2047), False, 'import argparse\n'), ((3831, 3853), 'io_utils.EcoFOCI_netCDF_read.EcoFOCI_netCDF', 'EcoFOCI_netCDF', (['ncfile'], {}), '(ncfile)\n', (3845, 3853), False, 'from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF\n'), ((4074, 4097), 'io_utils.EcoFOCI_netCDF_read.EcoFOCI_netCDF', 'EcoFOCI_netCDF', (['ncfilep'], {}), '(ncfilep)\n', (4088, 4097), False, 'from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF\n'), ((3936, 3982), 'calc.EPIC2Datetime.EPIC2Datetime', 'EPIC2Datetime', (["ncdata['time']", "ncdata['time2']"], {}), "(ncdata['time'], ncdata['time2'])\n", (3949, 3982), False, 'from calc.EPIC2Datetime import EPIC2Datetime, get_UDUNITS\n'), ((4176, 4198), 'numpy.ndim', 'np.ndim', (["ncdata['dep']"], {}), "(ncdata['dep'])\n", (4183, 4198), True, 'import numpy as np\n'), ((4520, 4564), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'])"], {}), "('images/' + g_atts['CRUISE'])\n", (4534, 4564), False, 'import os\n'), ((4570, 4611), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'])"], {}), "('images/' + g_atts['CRUISE'])\n", (4581, 4611), False, 'import os\n'), ((4619, 4677), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'] + '/TSSigma/')"], {}), "('images/' + g_atts['CRUISE'] + '/TSSigma/')\n", (4633, 4677), False, 'import os\n'), ((4683, 4738), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'] + '/TSSigma/')"], {}), "('images/' + g_atts['CRUISE'] + '/TSSigma/')\n", (4694, 4738), False, 'import os\n'), ((4746, 4801), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'] + '/TO2F/')"], {}), "('images/' + g_atts['CRUISE'] + '/TO2F/')\n", (4760, 4801), False, 'import os\n'), ((4807, 4859), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'] + '/TO2F/')"], {}), "('images/' + g_atts['CRUISE'] + '/TO2F/')\n", (4818, 4859), False, 'import os\n'), ((4867, 4926), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'] + '/TO2concF/')"], {}), "('images/' + g_atts['CRUISE'] + '/TO2concF/')\n", (4881, 4926), False, 'import os\n'), ((4932, 4988), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'] + '/TO2concF/')"], {}), "('images/' + g_atts['CRUISE'] + '/TO2concF/')\n", (4943, 4988), False, 'import os\n'), ((4996, 5059), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'] + '/PARTurbFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/PARTurbFluor/')\n", (5010, 5059), False, 'import os\n'), ((5065, 5125), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'] + '/PARTurbFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/PARTurbFluor/')\n", (5076, 5125), False, 'import os\n'), ((5133, 5193), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'] + '/TurbFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/TurbFluor/')\n", (5147, 5193), False, 'import os\n'), ((5199, 5256), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'] + '/TurbFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/TurbFluor/')\n", (5210, 5256), False, 'import os\n'), ((5264, 5328), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'] + '/ParTransFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/ParTransFluor/')\n", (5278, 5328), False, 'import os\n'), ((5334, 5395), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'] + '/ParTransFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/ParTransFluor/')\n", (5345, 5395), False, 'import os\n'), ((5403, 5468), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'] + '/TransTurbFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/TransTurbFluor/')\n", (5417, 5468), False, 'import os\n'), ((5474, 5536), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'] + '/TransTurbFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/TransTurbFluor/')\n", (5485, 5536), False, 'import os\n'), ((5544, 5605), 'os.path.exists', 'os.path.exists', (["('images/' + g_atts['CRUISE'] + '/TransFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/TransFluor/')\n", (5558, 5605), False, 'import os\n'), ((5611, 5669), 'os.makedirs', 'os.makedirs', (["('images/' + g_atts['CRUISE'] + '/TransFluor/')"], {}), "('images/' + g_atts['CRUISE'] + '/TransFluor/')\n", (5622, 5669), False, 'import os\n'), ((5799, 5815), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (5813, 5815), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((7079, 7090), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7088, 7090), True, 'import matplotlib.pyplot as plt\n'), ((7125, 7141), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (7139, 7141), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((8602, 8613), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8611, 8613), True, 'import matplotlib.pyplot as plt\n'), ((8648, 8664), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (8662, 8664), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((10129, 10140), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10138, 10140), True, 'import matplotlib.pyplot as plt\n'), ((10175, 10191), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (10189, 10191), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((11342, 11353), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11351, 11353), True, 'import matplotlib.pyplot as plt\n'), ((11388, 11404), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (11402, 11404), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((12563, 12574), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12572, 12574), True, 'import matplotlib.pyplot as plt\n'), ((12612, 12628), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (12626, 12628), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((13881, 13892), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13890, 13892), True, 'import matplotlib.pyplot as plt\n'), ((13931, 13947), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (13945, 13947), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((15199, 15210), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15208, 15210), True, 'import matplotlib.pyplot as plt\n'), ((15248, 15264), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (15262, 15264), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((16517, 16528), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16526, 16528), True, 'import matplotlib.pyplot as plt\n'), ((16568, 16584), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (16582, 16584), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((17844, 17855), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17853, 17855), True, 'import matplotlib.pyplot as plt\n'), ((17891, 17907), 'plots.profile_plot.CTDProfilePlot', 'CTDProfilePlot', ([], {}), '()\n', (17905, 17907), False, 'from plots.profile_plot import CTDProfilePlot\n'), ((19065, 19076), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19074, 19076), True, 'import matplotlib.pyplot as plt\n'), ((582, 607), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (597, 607), False, 'import os\n'), ((1262, 1276), 'numpy.isnan', 'np.isnan', (['var1'], {}), '(var1)\n', (1270, 1276), True, 'import numpy as np\n'), ((1287, 1301), 'numpy.isnan', 'np.isnan', (['var2'], {}), '(var2)\n', (1295, 1301), True, 'import numpy as np\n'), ((10480, 10492), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10488, 10492), True, 'import numpy as np\n'), ((10549, 10561), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10557, 10561), True, 'import numpy as np\n'), ((11693, 11705), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11701, 11705), True, 'import numpy as np\n'), ((11762, 11774), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11770, 11774), True, 'import numpy as np\n'), ((12930, 12942), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12938, 12942), True, 'import numpy as np\n'), ((12999, 13011), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13007, 13011), True, 'import numpy as np\n'), ((13068, 13080), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13076, 13080), True, 'import numpy as np\n'), ((14248, 14260), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14256, 14260), True, 'import numpy as np\n'), ((14316, 14328), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14324, 14328), True, 'import numpy as np\n'), ((14385, 14397), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14393, 14397), True, 'import numpy as np\n'), ((15566, 15578), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15574, 15578), True, 'import numpy as np\n'), ((15635, 15647), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15643, 15647), True, 'import numpy as np\n'), ((15704, 15716), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15712, 15716), True, 'import numpy as np\n'), ((16884, 16896), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (16892, 16896), True, 'import numpy as np\n'), ((16953, 16965), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (16961, 16965), True, 'import numpy as np\n'), ((17022, 17034), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17030, 17034), True, 'import numpy as np\n'), ((18194, 18206), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18202, 18206), True, 'import numpy as np\n'), ((18263, 18275), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18271, 18275), True, 'import numpy as np\n'), ((1363, 1377), 'numpy.isnan', 'np.isnan', (['var1'], {}), '(var1)\n', (1371, 1377), True, 'import numpy as np\n'), ((1392, 1406), 'numpy.isnan', 'np.isnan', (['var2'], {}), '(var2)\n', (1400, 1406), True, 'import numpy as np\n'), ((1519, 1533), 'numpy.isnan', 'np.isnan', (['var2'], {}), '(var2)\n', (1527, 1533), True, 'import numpy as np\n'), ((1440, 1454), 'numpy.isnan', 'np.isnan', (['var2'], {}), '(var2)\n', (1448, 1454), True, 'import numpy as np\n'), ((1488, 1502), 'numpy.isnan', 'np.isnan', (['var2'], {}), '(var2)\n', (1496, 1502), True, 'import numpy as np\n'), ((1548, 1562), 'numpy.isnan', 'np.isnan', (['var1'], {}), '(var1)\n', (1556, 1562), True, 'import numpy as np\n'), ((1596, 1610), 'numpy.isnan', 'np.isnan', (['var1'], {}), '(var1)\n', (1604, 1610), True, 'import numpy as np\n'), ((1644, 1658), 'numpy.isnan', 'np.isnan', (['var1'], {}), '(var1)\n', (1652, 1658), True, 'import numpy as np\n'), ((1710, 1724), 'numpy.isnan', 'np.isnan', (['var1'], {}), '(var1)\n', (1718, 1724), True, 'import numpy as np\n'), ((1739, 1753), 'numpy.isnan', 'np.isnan', (['var2'], {}), '(var2)\n', (1747, 1753), True, 'import numpy as np\n'), ((1797, 1811), 'numpy.isnan', 'np.isnan', (['var1'], {}), '(var1)\n', (1805, 1811), True, 'import numpy as np\n'), ((1826, 1840), 'numpy.isnan', 'np.isnan', (['var2'], {}), '(var2)\n', (1834, 1840), True, 'import numpy as np\n')] |
import cv2
import os
import numpy as np
import pandas as pd
from scipy.ndimage import zoom
#from matplotlib import pyplot as plt
def clipped_zoom(img, zoom_factor, **kwargs):
h, w = img.shape[:2]
# For multichannel images we don't want to apply the zoom factor to the RGB
# dimension, so instead we create a tuple of zoom factors, one per array
# dimension, with 1's for any trailing dimensions after the width and height.
zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)
# Zooming out
if zoom_factor < 1:
# Bounding box of the zoomed-out image within the output array
zh = int(np.round(h * zoom_factor))
zw = int(np.round(w * zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
# Zero-padding
out = np.zeros_like(img)
out[top:top+zh, left:left+zw] = zoom(img, zoom_tuple, **kwargs)
# Zooming in
elif zoom_factor > 1:
# Bounding box of the zoomed-in region within the input array
zh = int(np.round(h / zoom_factor))
zw = int(np.round(w / zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
out = zoom(img[top:top+zh, left:left+zw], zoom_tuple, **kwargs)
# `out` might still be slightly larger than `img` due to rounding, so
# trim off any extra pixels at the edges
trim_top = ((out.shape[0] - h) // 2)
trim_left = ((out.shape[1] - w) // 2)
out = out[trim_top:trim_top+h, trim_left:trim_left+w]
# If zoom_factor == 1, just return the input array
else:
out = img
return out
def detect_dnn_frame(net, frame):
frameOpencvDnn = frame.copy()
frameHeight = frameOpencvDnn.shape[0]
frameWidth = frameOpencvDnn.shape[1]
blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], False, False)
conf_threshold = 0.7
net.setInput(blob)
detections = net.forward()
bboxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
bboxes.append([x1, y1, x2, y2])
cv2.rectangle(frameOpencvDnn, (x1, y1), (x2, y2), (0, 255, 0), int(round(frameHeight/150)), 8)
return frameOpencvDnn, bboxes
def show_labels(folder_path):
data = pd.read_csv(os.path.join(folder_path,'sub-'+'train'+'-annotations-bbox.csv')).values.tolist()
# teste = np.array(annot[['Set', 'Participant', 'File']].astype(str))
# print(teste)
# print('='*60)
# folders.sort(key=int)
train_folder =os.path.join(folder_path, 'train')
image_names = os.listdir(train_folder)
image_names.sort()
#print(image_names)
for d in data:
#for i, image_name in enumerate(image_names):
print(d)
image_id = str(d[0])
image_path = os.path.join(train_folder, image_id)
print(image_path)
thermal = cv2.imread(image_path)
print(thermal.shape)
#print(image_path.split('/')[1:])
#(x,y,w,h) = np.array(d[['XMin', 'XMax', 'YMin', 'YMax']])
#(x,y,w,h) = (d[1], d[2], d[3], d[4])
(x,y,w,h) = (d[1], d[2], d[3], d[4])
print((x,y,w,h))
#thermal = cv2.rectangle(thermal,(x,y),(x+w,y+h),(255,0,0),2)
thermal = cv2.rectangle(thermal,(x,y),(w,h),(255,0,0),2)
cv2.imshow('Thermal', thermal)
if cv2.waitKey(0) > 0:
continue
#break
# cv2.imshow('Original', img)
# cv2.imshow('Cinza', gray)
# cv2.waitKey(0)
folder_path = 'data/CelebA/img_celeba_splitted'
#folder_path = 'data/Thermal_organized_splitted'
show_labels(folder_path)
#match_template(folder_path, rgb_folder_path)
| [
"cv2.dnn.blobFromImage",
"cv2.rectangle",
"os.listdir",
"os.path.join",
"cv2.imshow",
"cv2.waitKey",
"numpy.zeros_like",
"scipy.ndimage.zoom",
"numpy.round",
"cv2.imread"
] | [((1767, 1857), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frameOpencvDnn', '(1.0)', '(300, 300)', '[104, 117, 123]', '(False)', '(False)'], {}), '(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], \n False, False)\n', (1788, 1857), False, 'import cv2\n'), ((2790, 2824), 'os.path.join', 'os.path.join', (['folder_path', '"""train"""'], {}), "(folder_path, 'train')\n", (2802, 2824), False, 'import os\n'), ((2843, 2867), 'os.listdir', 'os.listdir', (['train_folder'], {}), '(train_folder)\n', (2853, 2867), False, 'import os\n'), ((800, 818), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (813, 818), True, 'import numpy as np\n'), ((859, 890), 'scipy.ndimage.zoom', 'zoom', (['img', 'zoom_tuple'], {}), '(img, zoom_tuple, **kwargs)\n', (863, 890), False, 'from scipy.ndimage import zoom\n'), ((3051, 3087), 'os.path.join', 'os.path.join', (['train_folder', 'image_id'], {}), '(train_folder, image_id)\n', (3063, 3087), False, 'import os\n'), ((3132, 3154), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3142, 3154), False, 'import cv2\n'), ((3498, 3552), 'cv2.rectangle', 'cv2.rectangle', (['thermal', '(x, y)', '(w, h)', '(255, 0, 0)', '(2)'], {}), '(thermal, (x, y), (w, h), (255, 0, 0), 2)\n', (3511, 3552), False, 'import cv2\n'), ((3553, 3583), 'cv2.imshow', 'cv2.imshow', (['"""Thermal"""', 'thermal'], {}), "('Thermal', thermal)\n", (3563, 3583), False, 'import cv2\n'), ((634, 659), 'numpy.round', 'np.round', (['(h * zoom_factor)'], {}), '(h * zoom_factor)\n', (642, 659), True, 'import numpy as np\n'), ((678, 703), 'numpy.round', 'np.round', (['(w * zoom_factor)'], {}), '(w * zoom_factor)\n', (686, 703), True, 'import numpy as np\n'), ((1166, 1227), 'scipy.ndimage.zoom', 'zoom', (['img[top:top + zh, left:left + zw]', 'zoom_tuple'], {}), '(img[top:top + zh, left:left + zw], zoom_tuple, **kwargs)\n', (1170, 1227), False, 'from scipy.ndimage import zoom\n'), ((3597, 3611), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3608, 3611), False, 'import cv2\n'), ((1023, 1048), 'numpy.round', 'np.round', (['(h / zoom_factor)'], {}), '(h / zoom_factor)\n', (1031, 1048), True, 'import numpy as np\n'), ((1067, 1092), 'numpy.round', 'np.round', (['(w / zoom_factor)'], {}), '(w / zoom_factor)\n', (1075, 1092), True, 'import numpy as np\n'), ((2548, 2617), 'os.path.join', 'os.path.join', (['folder_path', "('sub-' + 'train' + '-annotations-bbox.csv')"], {}), "(folder_path, 'sub-' + 'train' + '-annotations-bbox.csv')\n", (2560, 2617), False, 'import os\n')] |
from .Wavefunction import Wavefunction
from .LinAlg import expm, get_basis
import numba
import numpy as np
from scipy.fft import ifft2, fft2
class Nucleus(Wavefunction):
# Upon calling wilsonLine() or adjointWilsonLine(), these are properly defined
_wilsonLine = None
_adjointWilsonLine = None
# Some variables to keep track of what has been calculated/generated so far
# allowing us to avoid redundant computations
_wilsonLineExists = False
_adjointWilsonLineExists = False
def __init__(self, colorCharges, N, delta, mu, M=.5, g=1, Ny=100, rngSeed=None):
r"""
Dense object to be used in an instance of `cgc.Collision.Collision`.
Implements calculation of the Wilson Line using the generalized basis matrix set.
Parameters
----------
colorCharges : positive integer
The number of possible color charges; also the dimensionality of the special unitary group.
N : positive integer
The size of the square lattice to simulate.
delta : positive float
The distance between adjacent lattice sites.
mu : positive float
The scaling for the random gaussian distribution that generates the color charge density.
M : float (default=.5)
Infrared regulator parameter to regularize the Poisson equation for the gauge field.
g : float (default=1)
Parameter in the Poisson equation for the gauge field.
Ny : positive integer (default=100)
The longitudinal extent (in layers) of the nucleus object.
rngSeed : int (default=None)
Seed for the random number generator to initialize the color charge field
"""
super().__init__(colorCharges, N, delta, mu, M, g, rngSeed) # Super constructor
self._basis = get_basis(colorCharges)
self.Ny = Ny
# Modify the gaussian width to account for the multiple longitudinal layers
self.gaussianWidth = self.mu / self.delta / np.sqrt(self.Ny)
def colorChargeField(self, forceCalculate=False, verbose=0):
r"""
Generates the color charge density field according to a gaussian distribution. Differs
from super class implementation in that it generates the numerous fields according
to `Ny`. That is, the field \(\rho\) satisfies:
$$ \langle \rho_{a}^{(t)}(i^-,\vec i_{\perp}) \rho_{b}^{(t)}({j^-},\vec j_{\perp}) \rangle = g^2\mu_t^2 \frac{ 1 }{N_y \Delta^2} ~\delta_{ab}~\delta_{i_{\perp,1}\ j_{\perp,1}}~\delta_{i_{\perp,2} \ j_{\perp,2}} ~\delta_{i^- \ {j^-}} $$
If the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
colorChargeField : array(Ny, N, N, `colorCharges`**2 - 1)
"""
if self._colorChargeFieldExists and not forceCalculate:
return self._colorChargeField
if verbose > 0:
print(f'Generating {type(self).__name__} color charge field' + '.'*10, end='')
# To compare to old results
#self._colorChargeField = self.rng.normal(scale=self.gaussianWidth, size=(self.Ny, self.gluonDOF, self.N, self.N))
#self._colorChargeField = self._colorChargeField.swapaxes(1, 2)
#self._colorChargeField = self._colorChargeField.swapaxes(2, 3)
# Randomly generate the intial color charge density using a gaussian distribution
self._colorChargeField = self.rng.normal(scale=self.gaussianWidth, size=(self.Ny, self.N, self.N, self.gluonDOF))
# Make sure we don't regenerate this field since it already exists on future calls
self._colorChargeFieldExists = True
if verbose > 0:
print('finished!')
return self._colorChargeField
def gaugeField(self, forceCalculate=False, verbose=0):
r"""
Calculates the gauge field for all longitudinal layers and charge distributions by solving the (modified)
Poisson equation involving the color charge field
$$g \frac{1 } {\partial_\perp^2 - m^2 } \rho_a(i^-, \vec {i}_\perp )$$
via Fourier method.
If the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
gaugeField : array(Ny, N, N, `colorCharges`**2 - 1)
"""
if self._gaugeFieldExists and not forceCalculate:
return self._gaugeField
# Make sure the charge field has already been generated (if not, this will generate it)
self.colorChargeField(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} gauge field' + '.'*10, end='')
# Compute the fourier transform of the charge field
# Note that the normalization is set to 'backward', which for scipy means that the
# ifft2 is scaled by 1/n (where n = N^2)
chargeDensityFFTArr = fft2(self._colorChargeField, axes=(1,2), norm='backward')
# Absorb the numerator constants in the equation above into the charge density
chargeDensityFFTArr = -self.delta2 * self.g / 2 * chargeDensityFFTArr
# Calculate the individual elements of the gauge field in fourier space
# Note here that we have to solve the gauge field for each layer and for each gluon degree of freedom
# This method is defined at the bottom of this file; see there for more information
gaugeFieldFFTArr = _calculateGaugeFFTOpt(self.gluonDOF, self.N, self.Ny, self.poissonReg, chargeDensityFFTArr);
# Take the inverse fourier transform to get the actual gauge field
self._gaugeField = np.real(ifft2(gaugeFieldFFTArr, axes=(1,2), norm='backward'))
# Make sure this process isn't repeated unnecessarily by denoting that it has been done
self._gaugeFieldExists = True
if verbose > 0:
print('finished!')
return self._gaugeField
def wilsonLine(self, forceCalculate=False, verbose=0):
"""
Calculate the Wilson line using the gauge field and the appropriate basis matrices.
If the line already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
wilsonLine : array(N, N, `colorCharges`)
"""
if self._wilsonLineExists and not forceCalculate:
return self._wilsonLine
# Make sure the gauge field has already been calculated
self.gaugeField(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} wilson line' + '.'*10, end='')
# We now combine all of the longitudinal layers into the single wilson line
# Optimized method is defined at the end of this file; see there for more information
self._wilsonLine = _calculateWilsonLineOpt(self.N, self.Ny, self.colorCharges, self._basis, self._gaugeField)
self._wilsonLineExists = True
if verbose > 0:
print('finished!')
return self._wilsonLine
def adjointWilsonLine(self, forceCalculate=False, verbose=0):
"""
Calculate the Wilson line in the adjoint representation.
If the line already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
adjointWilsonLine : array(N, N, `colorCharges`**2 - 1, `colorCharges`**2 - 1)
"""
if self._adjointWilsonLineExists and not forceCalculate:
return self._adjointWilsonLine
# Make sure the wilson line has already been calculated
self.wilsonLine(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} adjoint wilson line' + '.'*10, end='')
# Calculation is optimized with numba, as with the previous calculations
# See bottom of the file for more information
self._adjointWilsonLine = _calculateAdjointWilsonLineOpt(self.gluonDOF, self.N, self._basis, self._wilsonLine)
self._adjointWilsonLineExists = True
if verbose > 0:
print('finished!')
return self._adjointWilsonLine
# Since we want to speed up the calculate, we define the calculation of the fourier elements of
# the gauge field using a numba-compiled method
# This has to be defined outside of the Nuclelus class since numbda doesn't play well with custom classes
@numba.jit(nopython=True, cache=True)
def _calculateGaugeFFTOpt(gluonDOF, N, Ny, poissonReg, chargeDensityFFTArr):
r"""
Calculate the elements of the gauge field in fourier space.
This method is optimized using numba.
"""
gaugeFieldFFTArr = np.zeros_like(chargeDensityFFTArr, dtype='complex')
# Precompute for speed
two_pi_over_N = 2 * np.pi / N
for l in range(Ny):
for k in range(gluonDOF):
for i in range(N):
for j in range(N):
gaugeFieldFFTArr[l,i,j,k] = chargeDensityFFTArr[l,i,j,k]/(2 - np.cos(two_pi_over_N*i) - np.cos(two_pi_over_N*j) + poissonReg)
return gaugeFieldFFTArr
# Same deal as the above method, we have to define it outside the class so
# numba doesn't get confused
@numba.jit(nopython=True, cache=True)
def _calculateWilsonLineOpt(N, Ny, colorCharges, basis, gaugeField):
r"""
Calculate the elements of the wilson line.
This method is optimized using numba.
"""
wilsonLine = np.zeros((N, N, colorCharges, colorCharges), dtype='complex')
gluonDOF = colorCharges**2 - 1
# Slightly different ordering of indexing than in other places in the code,
# due to the fact that we have to sum of the gluonDOF and Ny axis
for i in range(N):
for j in range(N):
# Create the unit matrix for each point since we are multiplying
# the wilson line as we go (so we need to start with the identity)
for c in range(colorCharges):
wilsonLine[i,j,c,c] = 1
# The path ordered exponential becomes a product of exponentials for each layer
for l in range(Ny):
# Evaluate the argument of the exponential first
# We multiply the elements of the gauge field for each gluon degree of freedom
# by the respective basis matrix and sum them together
expArgument = np.zeros((colorCharges, colorCharges), dtype='complex') # Same shape as basis matrices
for k in range(gluonDOF):
expArgument = expArgument + gaugeField[l,i,j,k] * basis[k]
# Now actually calculate the exponential with our custom defined expm method
# that can properly interface with numba (scipy's can't)
exponential = np.ascontiguousarray(expm(-1.j * expArgument))
wilsonLine[i,j] = np.dot(wilsonLine[i,j], exponential)
return wilsonLine
@numba.jit(nopython=True, cache=True)
def _calculateAdjointWilsonLineOpt(gluonDOF, N, basis, wilsonLine):
r"""
Calculate the wilson line in the adjoint representation.
This method is optimized using numba
"""
# Wilson line is always real in adjoint representation, so need to dtype='complex' as with the others
adjointWilsonLine = np.zeros((N, N, gluonDOF, gluonDOF), dtype='double')
for a in range(gluonDOF):
for b in range(gluonDOF):
for i in range(N):
for j in range(N):
V = wilsonLine[i,j]
Vdag = np.conjugate(np.transpose(V))
adjointWilsonLine[i,j,a,b] = 2 * np.real(np.trace(np.dot(np.dot(basis[a], V), np.dot(basis[b], Vdag))))
return adjointWilsonLine
| [
"numpy.sqrt",
"scipy.fft.fft2",
"scipy.fft.ifft2",
"numpy.zeros",
"numba.jit",
"numpy.dot",
"numpy.cos",
"numpy.transpose",
"numpy.zeros_like"
] | [((9861, 9897), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (9870, 9897), False, 'import numba\n'), ((10643, 10679), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (10652, 10679), False, 'import numba\n'), ((12384, 12420), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (12393, 12420), False, 'import numba\n'), ((10122, 10173), 'numpy.zeros_like', 'np.zeros_like', (['chargeDensityFFTArr'], {'dtype': '"""complex"""'}), "(chargeDensityFFTArr, dtype='complex')\n", (10135, 10173), True, 'import numpy as np\n'), ((10875, 10936), 'numpy.zeros', 'np.zeros', (['(N, N, colorCharges, colorCharges)'], {'dtype': '"""complex"""'}), "((N, N, colorCharges, colorCharges), dtype='complex')\n", (10883, 10936), True, 'import numpy as np\n'), ((12739, 12791), 'numpy.zeros', 'np.zeros', (['(N, N, gluonDOF, gluonDOF)'], {'dtype': '"""double"""'}), "((N, N, gluonDOF, gluonDOF), dtype='double')\n", (12747, 12791), True, 'import numpy as np\n'), ((5662, 5720), 'scipy.fft.fft2', 'fft2', (['self._colorChargeField'], {'axes': '(1, 2)', 'norm': '"""backward"""'}), "(self._colorChargeField, axes=(1, 2), norm='backward')\n", (5666, 5720), False, 'from scipy.fft import ifft2, fft2\n'), ((2037, 2053), 'numpy.sqrt', 'np.sqrt', (['self.Ny'], {}), '(self.Ny)\n', (2044, 2053), True, 'import numpy as np\n'), ((6401, 6454), 'scipy.fft.ifft2', 'ifft2', (['gaugeFieldFFTArr'], {'axes': '(1, 2)', 'norm': '"""backward"""'}), "(gaugeFieldFFTArr, axes=(1, 2), norm='backward')\n", (6406, 6454), False, 'from scipy.fft import ifft2, fft2\n'), ((11819, 11874), 'numpy.zeros', 'np.zeros', (['(colorCharges, colorCharges)'], {'dtype': '"""complex"""'}), "((colorCharges, colorCharges), dtype='complex')\n", (11827, 11874), True, 'import numpy as np\n'), ((12321, 12358), 'numpy.dot', 'np.dot', (['wilsonLine[i, j]', 'exponential'], {}), '(wilsonLine[i, j], exponential)\n', (12327, 12358), True, 'import numpy as np\n'), ((13003, 13018), 'numpy.transpose', 'np.transpose', (['V'], {}), '(V)\n', (13015, 13018), True, 'import numpy as np\n'), ((10469, 10494), 'numpy.cos', 'np.cos', (['(two_pi_over_N * j)'], {}), '(two_pi_over_N * j)\n', (10475, 10494), True, 'import numpy as np\n'), ((10443, 10468), 'numpy.cos', 'np.cos', (['(two_pi_over_N * i)'], {}), '(two_pi_over_N * i)\n', (10449, 10468), True, 'import numpy as np\n'), ((13097, 13116), 'numpy.dot', 'np.dot', (['basis[a]', 'V'], {}), '(basis[a], V)\n', (13103, 13116), True, 'import numpy as np\n'), ((13118, 13140), 'numpy.dot', 'np.dot', (['basis[b]', 'Vdag'], {}), '(basis[b], Vdag)\n', (13124, 13140), True, 'import numpy as np\n')] |
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
class PushGame1D:
NUM_CELLS = 10
PLAYER_VAL = 55
BOX_VAL = 155
TARGET_VAL = 255
def __init__(self):
self.stateSize = self.NUM_CELLS
self.actionSize = 2
self.cells = np.zeros((self.NUM_CELLS), dtype = "int32")
self.playerPos = 0
self.targetPos = self.NUM_CELLS - 1
self.boxPos = int(self.NUM_CELLS / 3)
self.cells[self.playerPos] = self.PLAYER_VAL
self.cells[self.targetPos] = self.TARGET_VAL
self.cells[self.boxPos] = self.BOX_VAL
def buildModel(self, learningRate):
model = Sequential()
hiddenNodes = int(self.stateSize * 0.8)
model.add(Dense(hiddenNodes, input_dim = self.stateSize, activation = "relu"))
model.add(Dense(hiddenNodes, activation = "relu"))
model.add(Dense(self.actionSize, activation = "linear")) # linear due to negative reward
model.compile(loss = "mse", optimizer = Adam(lr = learningRate))
return model
def step(self, action):
reward = 0
isDone = False
self.cells[self.playerPos] = 0
self.cells[self.boxPos] = 0
if action == 0: # move left
if self.playerPos > 0:
self.playerPos -= 1
else: # move right
if self.playerPos == self.boxPos - 1: # pushes the box
self.playerPos += 1
self.boxPos += 1
if self.boxPos == self.targetPos: # box reached target, game over
reward = 1
isDone = True
else:
self.playerPos += 1
self.cells[self.playerPos] = self.PLAYER_VAL
self.cells[self.boxPos] = self.BOX_VAL
return (self.cells, reward, isDone)
def state(self):
return np.reshape(self.cells, [1, self.stateSize])
def output(self):
stringOutput = ""
for cell in self.cells:
stringOutput = stringOutput + str(cell) + " "
print("\r{}".format(stringOutput), end = "")
def reset(self):
self.cells[self.playerPos] = 0
self.cells[self.boxPos] = 0
self.playerPos = 0
self.boxPos = int(self.NUM_CELLS / 3)
self.cells[self.playerPos] = self.PLAYER_VAL
self.cells[self.boxPos] = self.BOX_VAL
self.cells[self.targetPos] = self.TARGET_VAL
class PushGame2D:
I_DIM = 4
J_DIM = 4
PLAYER_VAL = 55
BOX_VAL = 155
TARGET_VAL = 255
def __init__(self):
self.stateSize = self.I_DIM * self.J_DIM
self.actionSize = 4
self.stateSetup()
def stateSetup(self):
self.cells = np.zeros((self.I_DIM, self.J_DIM), dtype = "int32")
self.playerPos = np.array([0, 0], dtype = "int32")
self.boxPos = np.array([2, 2], dtype = "int32")
#self.boxPos = np.array([int(self.I_DIM / 2), int(self.J_DIM / 2)], dtype = "int32")
self.targetPos = np.array([int(self.I_DIM - 1), int(self.J_DIM - 1)], dtype = "int32")
self.cells[self.playerPos[0]][self.playerPos[1]] = self.PLAYER_VAL
self.cells[self.boxPos[0]][self.boxPos[1]] = self.BOX_VAL
#self.cells[self.targetPos[0]][self.targetPos[1]] = self.TARGET_VAL
def buildModel(self, learningRate):
model = Sequential()
hiddenNodes = int(self.I_DIM * self.J_DIM * 2)
model.add(Dense(hiddenNodes, input_dim = self.stateSize, activation = "relu"))
model.add(Dense(hiddenNodes, activation = "relu"))
model.add(Dense(self.actionSize, activation = "linear")) # linear due to negative reward
model.compile(loss = "mse", optimizer = Adam(lr = learningRate))
return model
def step(self, action):
reward = 0
isDone = False
self.cells[self.playerPos[0]][self.playerPos[1]] = 0
self.cells[self.boxPos[0]][self.boxPos[1]] = 0
if action == 0: # move left
if (self.playerPos[0] == self.boxPos[0]
and self.playerPos[1] - 1 == self.boxPos[1]
and self.boxPos[1] > 0): #pushes the box
self.playerPos[1] -= 1
self.boxPos[1] -= 1
elif self.playerPos[1] > 0: # just move
self.playerPos[1] -= 1
elif action == 1: # move right
if (self.playerPos[0] == self.boxPos[0]
and self.playerPos[1] == self.boxPos[1] - 1
and self.boxPos[1] != self.J_DIM - 1): # pushes the box
self.playerPos[1] += 1
self.boxPos[1] += 1
elif self.playerPos[0] != self.boxPos[0] and self.playerPos[1] < self.boxPos[1]: # cannot move past the box
self.playerPos[1] += 1
elif action == 2: # move up
if (self.playerPos[0] - 1 == self.boxPos[0]
and self.playerPos[1] == self.boxPos[1]
and self.boxPos[0] > 0): # pushes the box
self.playerPos[0] -= 1
self.boxPos[0] -= 1
elif self.playerPos[0] > 0: # just move
self.playerPos[0] -= 1
else: # move down
if (self.playerPos[0] == self.boxPos[0] - 1
and self.playerPos[1] == self.boxPos[1]
and self.boxPos[0] != self.I_DIM - 1): # pushes the box
self.playerPos[0] = 1
self.boxPos[0] += 1
elif self.playerPos[1] != self.boxPos[1] and self.playerPos[0] < self.boxPos[0]: # cannot move past the box
self.playerPos[0] += 1
if np.array_equal(self.boxPos, self.targetPos): # box reached the target
reward = 1
isDone = True
elif self.boxPos[0] == 0 or self.boxPos[1] == 0: # pushed to the wrong edge, game over
reward = -1
isDone = True
else: # game continues
self.cells[self.playerPos[0]][self.playerPos[1]] = self.PLAYER_VAL
self.cells[self.boxPos[0]][self.boxPos[1]] = self.BOX_VAL
return (self.state(), reward, isDone)
def state(self):
return np.reshape(self.cells, [1, self.stateSize])
def output(self):
for bs in range(64):
print("\b\b", end = "")
stringOutput = ""
for row in self.cells:
for cell in row:
tempOutput = "%4s" % str(cell)
stringOutput = stringOutput + tempOutput
stringOutput = stringOutput + "\n"
print("\r{}".format(stringOutput))
def reset(self):
self.cells[self.playerPos[0]][self.playerPos[1]] = 0
self.cells[self.boxPos[0]][self.boxPos[1]] = 0
self.stateSetup()
| [
"keras.optimizers.Adam",
"numpy.reshape",
"keras.models.Sequential",
"numpy.array",
"numpy.zeros",
"numpy.array_equal",
"keras.layers.Dense"
] | [((347, 386), 'numpy.zeros', 'np.zeros', (['self.NUM_CELLS'], {'dtype': '"""int32"""'}), "(self.NUM_CELLS, dtype='int32')\n", (355, 386), True, 'import numpy as np\n'), ((718, 730), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (728, 730), False, 'from keras.models import Sequential\n'), ((1914, 1957), 'numpy.reshape', 'np.reshape', (['self.cells', '[1, self.stateSize]'], {}), '(self.cells, [1, self.stateSize])\n', (1924, 1957), True, 'import numpy as np\n'), ((2756, 2805), 'numpy.zeros', 'np.zeros', (['(self.I_DIM, self.J_DIM)'], {'dtype': '"""int32"""'}), "((self.I_DIM, self.J_DIM), dtype='int32')\n", (2764, 2805), True, 'import numpy as np\n'), ((2833, 2864), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': '"""int32"""'}), "([0, 0], dtype='int32')\n", (2841, 2864), True, 'import numpy as np\n'), ((2889, 2920), 'numpy.array', 'np.array', (['[2, 2]'], {'dtype': '"""int32"""'}), "([2, 2], dtype='int32')\n", (2897, 2920), True, 'import numpy as np\n'), ((3385, 3397), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3395, 3397), False, 'from keras.models import Sequential\n'), ((5664, 5707), 'numpy.array_equal', 'np.array_equal', (['self.boxPos', 'self.targetPos'], {}), '(self.boxPos, self.targetPos)\n', (5678, 5707), True, 'import numpy as np\n'), ((6192, 6235), 'numpy.reshape', 'np.reshape', (['self.cells', '[1, self.stateSize]'], {}), '(self.cells, [1, self.stateSize])\n', (6202, 6235), True, 'import numpy as np\n'), ((797, 860), 'keras.layers.Dense', 'Dense', (['hiddenNodes'], {'input_dim': 'self.stateSize', 'activation': '"""relu"""'}), "(hiddenNodes, input_dim=self.stateSize, activation='relu')\n", (802, 860), False, 'from keras.layers import Dense\n'), ((884, 921), 'keras.layers.Dense', 'Dense', (['hiddenNodes'], {'activation': '"""relu"""'}), "(hiddenNodes, activation='relu')\n", (889, 921), False, 'from keras.layers import Dense\n'), ((943, 986), 'keras.layers.Dense', 'Dense', (['self.actionSize'], {'activation': '"""linear"""'}), "(self.actionSize, activation='linear')\n", (948, 986), False, 'from keras.layers import Dense\n'), ((3471, 3534), 'keras.layers.Dense', 'Dense', (['hiddenNodes'], {'input_dim': 'self.stateSize', 'activation': '"""relu"""'}), "(hiddenNodes, input_dim=self.stateSize, activation='relu')\n", (3476, 3534), False, 'from keras.layers import Dense\n'), ((3558, 3595), 'keras.layers.Dense', 'Dense', (['hiddenNodes'], {'activation': '"""relu"""'}), "(hiddenNodes, activation='relu')\n", (3563, 3595), False, 'from keras.layers import Dense\n'), ((3617, 3660), 'keras.layers.Dense', 'Dense', (['self.actionSize'], {'activation': '"""linear"""'}), "(self.actionSize, activation='linear')\n", (3622, 3660), False, 'from keras.layers import Dense\n'), ((1070, 1091), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learningRate'}), '(lr=learningRate)\n', (1074, 1091), False, 'from keras.optimizers import Adam\n'), ((3744, 3765), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learningRate'}), '(lr=learningRate)\n', (3748, 3765), False, 'from keras.optimizers import Adam\n')] |
"""
Note: gmpe and emp (emperical) used interchangeably.
Calculate and plot PGVs, PGAs obsersations, simulations and gmpe.
"""
import sys
import os
import numpy as np
import glob
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pylab as plt
from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA
from geoNet.putils import plot_SMS_IM, plot_SMS_IM_ratio, plot_IMvsRrup
from geoNet.gmpe import readStationFile as rsf
from geoNet.gmpe.calculateGMPE import set_faultprop
#*****************************************************************************
# OBSERVATIONS
#*****************************************************************************
parent_dir_loc_obs="/nesi/projects/nesi00213/ObservedGroundMotions/ahsan/Mw4pt6_20100904_103801_11Jan2017/Vol1/data"
stats_dict_obs = read_statsll("/nesi/projects/nesi00213/ObservedGroundMotions/ahsan/Mw4pt9_20110429_190804_11Jan2017",
"20100904_103801_eventStats_2017-01-10.ll")
plot_dir_accBB_obs="accBB_1"
plot_dir_velBB_obs="velBB_1"
loc_accBB_obs="/".join([parent_dir_loc_obs, plot_dir_accBB_obs])
loc_velBB_obs="/".join([parent_dir_loc_obs, plot_dir_velBB_obs])
loc_acc_obs = loc_accBB_obs
loc_vel_obs = loc_velBB_obs
#reset stats_dict to those processed
stats_dict_obs=get_processed_stats_list(loc_vel_obs,stats_dict_obs)
#*****************************************************************************
# SIMULATIONS
#*****************************************************************************
base_dir_sim="/nesi/projects/nesi00213/RunFolder/ahsan"
parent_dir_loc_sim="/".join([base_dir_sim, "ptSource_2010Sep04_v2_m4pt6_VMv1p64_FVM-h0p100_170123"])
stats_dict_sim = read_statsll(parent_dir_loc_sim, "HF_BB_stats.ll")
plot_dir_accBB_sim="Acc"
plot_dir_velBB_sim="Vel"
loc_accBB_sim=glob.glob("/".join([parent_dir_loc_sim, "BB", "*", plot_dir_accBB_sim]))[0]
loc_velBB_sim=glob.glob("/".join([parent_dir_loc_sim, "BB", "*", plot_dir_velBB_sim]))[0]
loc_acc_sim = loc_accBB_sim
loc_vel_sim = loc_velBB_sim
#*****************************************************************************
# Perform calculations and plot
#
#*****************************************************************************
# (1) construct a dictionary that only contains SMS present both in simulations and observations
stats_codes_simObs = list(set(stats_dict_obs) & set(stats_dict_sim))
stats_dict_simObs = {}
for stat_code in stats_codes_simObs:
lon, lat = stats_dict_sim[stat_code]
stats_dict_simObs[stat_code] = (lon, lat)
# (2) Sort the SMSs according to distance, need to know fault/source properties
srf_fname="/nesi/projects/nesi00213/RupModel/ahsan/2010Sep04_m4pt6/ptSource/Srf/2010Sep04_v2_m4pt6.srf"
FiniteFault = rsf.readSrfFile(srf_fname)
FiniteFault = rsf.Points_np(FiniteFault)
#set fault properties
faultprop = set_faultprop(Mw=4.6, rake=173., dip=74., Ztor=8.)
sorted_stat_codes, sms_rrup_rjb = get_SMS_Rrups(stats_dict_simObs, FiniteFault)
sms_rrup, sms_rjb = sms_rrup_rjb[:,0], sms_rrup_rjb[:,1]
# (3) calculate PGV, PGA for simulations and observation for the above SMSs
#Note: Observations acc has g units but simulations acc is in cm/s^2. Rescale
#one or the other
g=981. #cm/s^2
PGV_sim = get_SMS_PGV(sorted_stat_codes, loc_vel_sim, absVal=False)
PGV_obs = get_SMS_PGV(sorted_stat_codes, loc_vel_obs, absVal=False)
PGA_sim = get_SMS_PGA(sorted_stat_codes, loc_acc_sim, absVal=False)/g
PGA_obs = get_SMS_PGA(sorted_stat_codes, loc_acc_obs, absVal=False)
# (4) calculate PGV, PGA with GMPE
#get_empIM_v2(Rrup, period, faultprop, Rjb=None, Rtvz=0., V30measured=0., V30=250.):
Rrups_gmpe = np.logspace(np.log10(5.),np.log10(100.),30)
period=-1
PGV_gmpe, PGV_gmpe_std = get_empIM_v2(Rrups_gmpe, period, faultprop)
period=0
PGA_gmpe, PGA_gmpe_std = get_empIM_v2(Rrups_gmpe, period, faultprop)
# (5) PGV plots
#Note -1 is the geometric mean component
fig, ax = plot_SMS_IM(sms_rrup, PGV_sim[:,-1], PGV_obs[:,-1])
#Now underlay gmpe predictions
fig, ax = plot_IMvsRrup(Rrups_gmpe, PGV_gmpe[:,0], PGV_gmpe_std[:,0], fig=fig, ax=ax)
ax.legend(loc="best", scatterpoints=1)
fig.savefig("ex5_PGV.png")
plt.close('all')
fig, ax = plot_SMS_IM_ratio(sms_rrup, PGV_obs[:,-1]/PGV_sim[:,-1])
fig.savefig("ex5_PGV_ratio.png")
plt.close('all')
# (6) PGA plots
#Note -1 is the geometric mean component
fig, ax = plot_SMS_IM(sms_rrup, PGA_sim[:,-1], PGA_obs[:,-1])
#Now underlay gmpe predictions
fig, ax = plot_IMvsRrup(Rrups_gmpe, PGA_gmpe[:,0], PGA_gmpe_std[:,0], fig=fig, ax=ax)
ax.legend(loc="best", scatterpoints=1)
fig.savefig("ex5_PGA.png")
plt.close('all')
fig, ax = plot_SMS_IM_ratio(sms_rrup, PGA_obs[:,-1]/PGA_sim[:,-1])
fig.savefig("ex5_PGA_ratio.png")
plt.close('all')
| [
"geoNet.utils.read_statsll",
"geoNet.putils.plot_IMvsRrup",
"numpy.log10",
"geoNet.utils.get_processed_stats_list",
"geoNet.utils.get_SMS_PGV",
"matplotlib.use",
"geoNet.gmpe.calculateGMPE.set_faultprop",
"geoNet.gmpe.readStationFile.Points_np",
"geoNet.utils.get_SMS_PGA",
"geoNet.utils.get_empIM_... | [((249, 270), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (263, 270), False, 'import matplotlib\n'), ((932, 1087), 'geoNet.utils.read_statsll', 'read_statsll', (['"""/nesi/projects/nesi00213/ObservedGroundMotions/ahsan/Mw4pt9_20110429_190804_11Jan2017"""', '"""20100904_103801_eventStats_2017-01-10.ll"""'], {}), "(\n '/nesi/projects/nesi00213/ObservedGroundMotions/ahsan/Mw4pt9_20110429_190804_11Jan2017'\n , '20100904_103801_eventStats_2017-01-10.ll')\n", (944, 1087), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((1379, 1432), 'geoNet.utils.get_processed_stats_list', 'get_processed_stats_list', (['loc_vel_obs', 'stats_dict_obs'], {}), '(loc_vel_obs, stats_dict_obs)\n', (1403, 1432), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((1805, 1855), 'geoNet.utils.read_statsll', 'read_statsll', (['parent_dir_loc_sim', '"""HF_BB_stats.ll"""'], {}), "(parent_dir_loc_sim, 'HF_BB_stats.ll')\n", (1817, 1855), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((2870, 2896), 'geoNet.gmpe.readStationFile.readSrfFile', 'rsf.readSrfFile', (['srf_fname'], {}), '(srf_fname)\n', (2885, 2896), True, 'from geoNet.gmpe import readStationFile as rsf\n'), ((2911, 2937), 'geoNet.gmpe.readStationFile.Points_np', 'rsf.Points_np', (['FiniteFault'], {}), '(FiniteFault)\n', (2924, 2937), True, 'from geoNet.gmpe import readStationFile as rsf\n'), ((2972, 3025), 'geoNet.gmpe.calculateGMPE.set_faultprop', 'set_faultprop', ([], {'Mw': '(4.6)', 'rake': '(173.0)', 'dip': '(74.0)', 'Ztor': '(8.0)'}), '(Mw=4.6, rake=173.0, dip=74.0, Ztor=8.0)\n', (2985, 3025), False, 'from geoNet.gmpe.calculateGMPE import set_faultprop\n'), ((3058, 3103), 'geoNet.utils.get_SMS_Rrups', 'get_SMS_Rrups', (['stats_dict_simObs', 'FiniteFault'], {}), '(stats_dict_simObs, FiniteFault)\n', (3071, 3103), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((3360, 3417), 'geoNet.utils.get_SMS_PGV', 'get_SMS_PGV', (['sorted_stat_codes', 'loc_vel_sim'], {'absVal': '(False)'}), '(sorted_stat_codes, loc_vel_sim, absVal=False)\n', (3371, 3417), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((3428, 3485), 'geoNet.utils.get_SMS_PGV', 'get_SMS_PGV', (['sorted_stat_codes', 'loc_vel_obs'], {'absVal': '(False)'}), '(sorted_stat_codes, loc_vel_obs, absVal=False)\n', (3439, 3485), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((3567, 3624), 'geoNet.utils.get_SMS_PGA', 'get_SMS_PGA', (['sorted_stat_codes', 'loc_acc_obs'], {'absVal': '(False)'}), '(sorted_stat_codes, loc_acc_obs, absVal=False)\n', (3578, 3624), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((3838, 3881), 'geoNet.utils.get_empIM_v2', 'get_empIM_v2', (['Rrups_gmpe', 'period', 'faultprop'], {}), '(Rrups_gmpe, period, faultprop)\n', (3850, 3881), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((3916, 3959), 'geoNet.utils.get_empIM_v2', 'get_empIM_v2', (['Rrups_gmpe', 'period', 'faultprop'], {}), '(Rrups_gmpe, period, faultprop)\n', (3928, 3959), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((4029, 4082), 'geoNet.putils.plot_SMS_IM', 'plot_SMS_IM', (['sms_rrup', 'PGV_sim[:, -1]', 'PGV_obs[:, -1]'], {}), '(sms_rrup, PGV_sim[:, -1], PGV_obs[:, -1])\n', (4040, 4082), False, 'from geoNet.putils import plot_SMS_IM, plot_SMS_IM_ratio, plot_IMvsRrup\n'), ((4122, 4199), 'geoNet.putils.plot_IMvsRrup', 'plot_IMvsRrup', (['Rrups_gmpe', 'PGV_gmpe[:, 0]', 'PGV_gmpe_std[:, 0]'], {'fig': 'fig', 'ax': 'ax'}), '(Rrups_gmpe, PGV_gmpe[:, 0], PGV_gmpe_std[:, 0], fig=fig, ax=ax)\n', (4135, 4199), False, 'from geoNet.putils import plot_SMS_IM, plot_SMS_IM_ratio, plot_IMvsRrup\n'), ((4264, 4280), 'matplotlib.pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4273, 4280), True, 'from matplotlib import pylab as plt\n'), ((4292, 4352), 'geoNet.putils.plot_SMS_IM_ratio', 'plot_SMS_IM_ratio', (['sms_rrup', '(PGV_obs[:, -1] / PGV_sim[:, -1])'], {}), '(sms_rrup, PGV_obs[:, -1] / PGV_sim[:, -1])\n', (4309, 4352), False, 'from geoNet.putils import plot_SMS_IM, plot_SMS_IM_ratio, plot_IMvsRrup\n'), ((4382, 4398), 'matplotlib.pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4391, 4398), True, 'from matplotlib import pylab as plt\n'), ((4469, 4522), 'geoNet.putils.plot_SMS_IM', 'plot_SMS_IM', (['sms_rrup', 'PGA_sim[:, -1]', 'PGA_obs[:, -1]'], {}), '(sms_rrup, PGA_sim[:, -1], PGA_obs[:, -1])\n', (4480, 4522), False, 'from geoNet.putils import plot_SMS_IM, plot_SMS_IM_ratio, plot_IMvsRrup\n'), ((4562, 4639), 'geoNet.putils.plot_IMvsRrup', 'plot_IMvsRrup', (['Rrups_gmpe', 'PGA_gmpe[:, 0]', 'PGA_gmpe_std[:, 0]'], {'fig': 'fig', 'ax': 'ax'}), '(Rrups_gmpe, PGA_gmpe[:, 0], PGA_gmpe_std[:, 0], fig=fig, ax=ax)\n', (4575, 4639), False, 'from geoNet.putils import plot_SMS_IM, plot_SMS_IM_ratio, plot_IMvsRrup\n'), ((4704, 4720), 'matplotlib.pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4713, 4720), True, 'from matplotlib import pylab as plt\n'), ((4732, 4792), 'geoNet.putils.plot_SMS_IM_ratio', 'plot_SMS_IM_ratio', (['sms_rrup', '(PGA_obs[:, -1] / PGA_sim[:, -1])'], {}), '(sms_rrup, PGA_obs[:, -1] / PGA_sim[:, -1])\n', (4749, 4792), False, 'from geoNet.putils import plot_SMS_IM, plot_SMS_IM_ratio, plot_IMvsRrup\n'), ((4822, 4838), 'matplotlib.pylab.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4831, 4838), True, 'from matplotlib import pylab as plt\n'), ((3497, 3554), 'geoNet.utils.get_SMS_PGA', 'get_SMS_PGA', (['sorted_stat_codes', 'loc_acc_sim'], {'absVal': '(False)'}), '(sorted_stat_codes, loc_acc_sim, absVal=False)\n', (3508, 3554), False, 'from geoNet.utils import read_statsll, get_processed_stats_list, get_SMS_Rrups, get_empIM_v2, get_SMS_PGV, get_SMS_PGA\n'), ((3771, 3784), 'numpy.log10', 'np.log10', (['(5.0)'], {}), '(5.0)\n', (3779, 3784), True, 'import numpy as np\n'), ((3784, 3799), 'numpy.log10', 'np.log10', (['(100.0)'], {}), '(100.0)\n', (3792, 3799), True, 'import numpy as np\n')] |
import os, torch
import numpy as np
from torch import nn
from torch import functional as F
import tifffile
from tqdm import tqdm
class BCEFocalLoss(torch.nn.Module):
def __init__(self, gamma=2, alpha=0.25, reduction='elementwise_mean'):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
def forward(self, _input, target):
#pt = torch.sigmoid(_input)
pt = _input
loss = - self.alpha * (1 - pt) ** self.gamma * target * torch.log(pt) - \
(1-self.alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt)
# if self.alpha:
# loss = loss * self.alpha
if self.reduction == 'elementwise_mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
class SoftDiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(SoftDiceLoss, self).__init__()
def forward(self, probs, targets):#logits,
num = targets.size(0)
smooth = 1
#probs = F.sigmoid(logits)
m1 = probs.view(num, -1)
m2 = targets.view(num, -1)
intersection = (m1 * m2)
score = 2. * (intersection.sum(1) + smooth) / (m1.sum(1) + m2.sum(1) + smooth)
score = 1 - score.sum() / num
return score
class DataPackage:
def __init__(self, lrDir, hrDir, m = 0, s = 0, p = 0.5):
self.lrDir = lrDir
self.hrDir = hrDir
self.meanVal = m
self.stdVal = s
self.prob = p
def SetMean(self, val):
self.meanVal = val
def SetStd(self, val):
self.stdVal = val
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size // 2),
bias=bias)
def default_conv3d(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv3d(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size // 2),
bias=bias)
def prepare(dev, *args):
# print(dev)
device = torch.device(dev)
if dev == 'cpu':
device = torch.device('cpu')
return [a.to(device) for a in args]
def RestoreNetImg(img, mean, max):
# rImg = (img - self.mean1) / self.std1
#rImg = np.maximum(np.minimum(img * max + mean, 255), 0)
rImg = img * max + mean
maxVal = np.max(rImg)
minVal = np.min(rImg)
if maxVal <= minVal:
rImg *= 0
else:
rImg = 255./(maxVal - minVal) * (rImg - minVal)
rImg = np.maximum(np.minimum(rImg, 255), 0)
return rImg
def RestoreNetImgV2(img, mean, max):
# rImg = (img - self.mean1) / self.std1
#rImg = np.maximum(np.minimum(img * max + mean, 255), 0)
rImg = img * max + mean
rImg = np.maximum(np.minimum(rImg, 255), 0)
return rImg
class WDSRBBlock3D(nn.Module):
def __init__(
self, n_feats, kernel_size, wn, act=nn.ReLU(True), res_scale=1):
super(WDSRBBlock3D, self).__init__()
self.res_scale = res_scale
body = []
expand = 6
linear = 0.8
body.append(
wn(nn.Conv3d(n_feats, n_feats*expand, 1, padding=1//2)))
body.append(act)
body.append(
wn(nn.Conv3d(n_feats*expand, int(n_feats*linear), 1, padding=1//2)))
body.append(
wn(nn.Conv3d(int(n_feats*linear), n_feats, kernel_size, padding=kernel_size//2)))
self.body = nn.Sequential(*body)
def forward(self, x):
# res = self.body(x) * self.res_scale
# res += x
res = self.body(x) + x
return res
class ResBlock3D(nn.Module):
def __init__(self,
conv=default_conv3d,
n_feats=64,
kernel_size=3,
bias=True,
bn=False,
act=nn.ReLU(inplace=True), # nn.LeakyReLU(inplace=True),
res_scale=1):
super(ResBlock3D, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm3d(n_feats))
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
res += x
return res
class ConvLayer(nn.Module):
def __init__(self,
inplane = 64,
n_feats=32,
stride = 1,
kernel_size=3,
bias=True,
bn=nn.BatchNorm3d,
padding = 1,
act=nn.ReLU(inplace=True), # nn.LeakyReLU(inplace=True),
res_scale=1):
super(ConvLayer, self).__init__()
m = []
m.append(nn.Conv3d(inplane, n_feats,kernel_size = kernel_size,
stride = stride,padding = padding, bias=bias))
if bn is not None:
m.append(bn(n_feats))
if act is not None:
m.append(act)
self.body = nn.Sequential(*m)
def forward(self, x):
res = self.body(x)
return res
class UpLayer(nn.Module):
def __init__(self,
inplane = 64,
n_feats=32,
scale_factor=2,
bn = nn.BatchNorm3d,
act=nn.ReLU(inplace=True) # nn.LeakyReLU(inplace=True),
):
super(UpLayer, self).__init__()
m = []
m.append(nn.Upsample(scale_factor=scale_factor,mode='trilinear'))
m.append(nn.Conv3d(in_channels=inplane,out_channels = n_feats,
kernel_size=3,padding=3//2 ))
if bn is not None:
m.append(bn(n_feats))
m.append(act)
self.body = nn.Sequential(*m)
def forward(self, x):
res = self.body(x)
return res
class PixelUpsampler3D(nn.Module):
def __init__(self,
upscale_factor,
# conv=default_conv3d,
# n_feats=32,
# kernel_size=3,
# bias=True
):
super(PixelUpsampler3D, self).__init__()
self.scaleFactor = upscale_factor
def _pixel_shuffle(self, input, upscale_factor):
batch_size, channels, in_depth, in_height, in_width = input.size()
channels //= upscale_factor[0] * upscale_factor[1] * upscale_factor[2]
out_depth = in_depth * upscale_factor[0]
out_height = in_height * upscale_factor[1]
out_width = in_width * upscale_factor[2]
input_view = input.contiguous().view(
batch_size, channels, upscale_factor[0], upscale_factor[1], upscale_factor[2], in_depth,
in_height, in_width)
shuffle_out = input_view.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
return shuffle_out.view(batch_size, channels, out_depth, out_height, out_width)
def forward(self, x):
# x = self.conv(x)
up = self._pixel_shuffle(x, self.scaleFactor)
return up
class GetMultiTypeMemoryDataSetAndCrop:
def __init__(self, dataList, cropSize, epoch):
self.dataList:DataPackage = dataList
self.lrImgList = [[] for x in range(len(self.dataList))]
self.hrImgList = [[] for x in range(len(self.dataList))]
self.randProbInteval = [0 for x in range(len(self.dataList) + 1)]
for k in range(1,len(self.dataList)+1):
self.randProbInteval[k] = self.dataList[k-1].prob * 100 + self.randProbInteval[k-1]
self.epoch = epoch
self.beg = [0, 0, 0]
self.cropSz = cropSize
for k in range(len(self.dataList)):
pack = self.dataList[k]
lrDir = pack.lrDir
hrDir = pack.hrDir
lrFileList = []
hrFileList = []
for file in os.listdir(lrDir):
if file.endswith('.tif'):
lrFileList.append(file)
for file in os.listdir(hrDir):
if file.endswith('bin.tif'):
hrFileList.append(file)
for ind in tqdm(range(len(lrFileList))):
lrName = os.path.join(lrDir,lrFileList[ind])
hrName = os.path.join(hrDir, hrFileList[ind])
lrImg = tifffile.imread(lrName)
hrImg = tifffile.imread(hrName)
lrImg = np.expand_dims(lrImg, axis=0)
hrImg = np.expand_dims(hrImg, axis=0)
self.lrImgList[k].append(lrImg)
self.hrImgList[k].append(hrImg)
def __len__(self):
return self.epoch#len(self.hrFileList)
def len(self):
return self.epoch#len(self.hrFileList)
def __getitem__(self, ind):
flag = True
dataID = 0
randNum = np.random.randint(self.randProbInteval[-1])#len(self.dataList)
for k in range(len(self.randProbInteval)-1):
if self.randProbInteval[k] < randNum < self.randProbInteval[k + 1]:
dataID = k
break
ind = np.random.randint(len(self.lrImgList[dataID]))
tryNum = 0
while flag:
sz = self.lrImgList[dataID][ind].shape
self.beg[0] = np.random.randint(0, sz[1] - self.cropSz[0] - 1)
self.beg[1] = np.random.randint(0, sz[2] - self.cropSz[1] - 1)
self.beg[2] = np.random.randint(0, sz[3] - self.cropSz[2] - 1)
hrImg = self.hrImgList[dataID][ind][:, self.beg[0] * 4:self.beg[0] * 4 + self.cropSz[0] * 4,
self.beg[1] * 2:self.beg[1] * 2 + self.cropSz[1] * 2,
self.beg[2] * 2:self.beg[2] * 2 + self.cropSz[2] * 2]
if np.sum(hrImg) < 800 and tryNum < 20:
tryNum += 1
else:
lrImg = self.lrImgList[dataID][ind][:, self.beg[0]:self.beg[0] + self.cropSz[0],
self.beg[1]:self.beg[1] + self.cropSz[1],
self.beg[2]:self.beg[2] + self.cropSz[2]]
flag = False
rid = np.random.randint(0,6)
if rid == 0:
pass#return lrImg, midImg, hrImg
if rid == 1:
lrImg,hrImg = lrImg[:,::-1,:,:], hrImg[:,::-1,:,:]
if rid == 2:
lrImg,hrImg = lrImg[:,:,::-1,:], hrImg[:,:,::-1,:]
if rid == 3:
lrImg,hrImg = lrImg[:,:,:,::-1], hrImg[:,:,:,::-1]
if rid == 4:
lrImg,hrImg = lrImg[:,::-1,::-1,:], hrImg[:,::-1,::-1,:]
if rid == 5:
lrImg,hrImg = lrImg[:,:,::-1,::-1], hrImg[:,:,::-1,::-1]
lrImg = torch.from_numpy(lrImg.copy().astype(np.float)).float()
hrImg = torch.from_numpy(hrImg.copy().astype(np.float)).float()
lrImg = (lrImg - self.dataList[dataID].meanVal) / self.dataList[dataID].stdVal
hrImg = hrImg / 255.
return lrImg, hrImg , self.dataList[dataID].meanVal, self.dataList[dataID].stdVal
| [
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.sum",
"os.listdir",
"torch.mean",
"numpy.max",
"numpy.min",
"torch.nn.BatchNorm3d",
"torch.nn.Conv3d",
"tifffile.imread",
"torch.nn.Upsample",
"torch.device",
"torch.log",
"numpy.minimum",
"os.path.join",
"torch.nn.Conv2d",
"numpy.sum",
... | [((1836, 1926), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {'padding': '(kernel_size // 2)', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, padding=kernel_size // 2,\n bias=bias)\n', (1845, 1926), False, 'from torch import nn\n'), ((2059, 2149), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_channels', 'out_channels', 'kernel_size'], {'padding': '(kernel_size // 2)', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, padding=kernel_size // 2,\n bias=bias)\n', (2068, 2149), False, 'from torch import nn\n'), ((2256, 2273), 'torch.device', 'torch.device', (['dev'], {}), '(dev)\n', (2268, 2273), False, 'import os, torch\n'), ((2563, 2575), 'numpy.max', 'np.max', (['rImg'], {}), '(rImg)\n', (2569, 2575), True, 'import numpy as np\n'), ((2590, 2602), 'numpy.min', 'np.min', (['rImg'], {}), '(rImg)\n', (2596, 2602), True, 'import numpy as np\n'), ((2314, 2333), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2326, 2333), False, 'import os, torch\n'), ((2985, 3006), 'numpy.minimum', 'np.minimum', (['rImg', '(255)'], {}), '(rImg, 255)\n', (2995, 3006), True, 'import numpy as np\n'), ((3126, 3139), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3133, 3139), False, 'from torch import nn\n'), ((3660, 3680), 'torch.nn.Sequential', 'nn.Sequential', (['*body'], {}), '(*body)\n', (3673, 3680), False, 'from torch import nn\n'), ((4066, 4087), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4073, 4087), False, 'from torch import nn\n'), ((4433, 4450), 'torch.nn.Sequential', 'nn.Sequential', (['*m'], {}), '(*m)\n', (4446, 4450), False, 'from torch import nn\n'), ((4881, 4902), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4888, 4902), False, 'from torch import nn\n'), ((5315, 5332), 'torch.nn.Sequential', 'nn.Sequential', (['*m'], {}), '(*m)\n', (5328, 5332), False, 'from torch import nn\n'), ((5620, 5641), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5627, 5641), False, 'from torch import nn\n'), ((6067, 6084), 'torch.nn.Sequential', 'nn.Sequential', (['*m'], {}), '(*m)\n', (6080, 6084), False, 'from torch import nn\n'), ((9157, 9200), 'numpy.random.randint', 'np.random.randint', (['self.randProbInteval[-1]'], {}), '(self.randProbInteval[-1])\n', (9174, 9200), True, 'import numpy as np\n'), ((10433, 10456), 'numpy.random.randint', 'np.random.randint', (['(0)', '(6)'], {}), '(0, 6)\n', (10450, 10456), True, 'import numpy as np\n'), ((774, 790), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (784, 790), False, 'import os, torch\n'), ((2743, 2764), 'numpy.minimum', 'np.minimum', (['rImg', '(255)'], {}), '(rImg, 255)\n', (2753, 2764), True, 'import numpy as np\n'), ((5046, 5146), 'torch.nn.Conv3d', 'nn.Conv3d', (['inplane', 'n_feats'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': 'bias'}), '(inplane, n_feats, kernel_size=kernel_size, stride=stride, padding\n =padding, bias=bias)\n', (5055, 5146), False, 'from torch import nn\n'), ((5771, 5827), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'scale_factor', 'mode': '"""trilinear"""'}), "(scale_factor=scale_factor, mode='trilinear')\n", (5782, 5827), False, 'from torch import nn\n'), ((5848, 5936), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': 'inplane', 'out_channels': 'n_feats', 'kernel_size': '(3)', 'padding': '(3 // 2)'}), '(in_channels=inplane, out_channels=n_feats, kernel_size=3, padding\n =3 // 2)\n', (5857, 5936), False, 'from torch import nn\n'), ((8183, 8200), 'os.listdir', 'os.listdir', (['lrDir'], {}), '(lrDir)\n', (8193, 8200), False, 'import os, torch\n'), ((8317, 8334), 'os.listdir', 'os.listdir', (['hrDir'], {}), '(hrDir)\n', (8327, 8334), False, 'import os, torch\n'), ((9590, 9638), 'numpy.random.randint', 'np.random.randint', (['(0)', '(sz[1] - self.cropSz[0] - 1)'], {}), '(0, sz[1] - self.cropSz[0] - 1)\n', (9607, 9638), True, 'import numpy as np\n'), ((9666, 9714), 'numpy.random.randint', 'np.random.randint', (['(0)', '(sz[2] - self.cropSz[1] - 1)'], {}), '(0, sz[2] - self.cropSz[1] - 1)\n', (9683, 9714), True, 'import numpy as np\n'), ((9742, 9790), 'numpy.random.randint', 'np.random.randint', (['(0)', '(sz[3] - self.cropSz[2] - 1)'], {}), '(0, sz[3] - self.cropSz[2] - 1)\n', (9759, 9790), True, 'import numpy as np\n'), ((535, 548), 'torch.log', 'torch.log', (['pt'], {}), '(pt)\n', (544, 548), False, 'import os, torch\n'), ((620, 637), 'torch.log', 'torch.log', (['(1 - pt)'], {}), '(1 - pt)\n', (629, 637), False, 'import os, torch\n'), ((850, 865), 'torch.sum', 'torch.sum', (['loss'], {}), '(loss)\n', (859, 865), False, 'import os, torch\n'), ((3336, 3391), 'torch.nn.Conv3d', 'nn.Conv3d', (['n_feats', '(n_feats * expand)', '(1)'], {'padding': '(1 // 2)'}), '(n_feats, n_feats * expand, 1, padding=1 // 2)\n', (3345, 3391), False, 'from torch import nn\n'), ((8509, 8545), 'os.path.join', 'os.path.join', (['lrDir', 'lrFileList[ind]'], {}), '(lrDir, lrFileList[ind])\n', (8521, 8545), False, 'import os, torch\n'), ((8571, 8607), 'os.path.join', 'os.path.join', (['hrDir', 'hrFileList[ind]'], {}), '(hrDir, hrFileList[ind])\n', (8583, 8607), False, 'import os, torch\n'), ((8633, 8656), 'tifffile.imread', 'tifffile.imread', (['lrName'], {}), '(lrName)\n', (8648, 8656), False, 'import tifffile\n'), ((8682, 8705), 'tifffile.imread', 'tifffile.imread', (['hrName'], {}), '(hrName)\n', (8697, 8705), False, 'import tifffile\n'), ((8733, 8762), 'numpy.expand_dims', 'np.expand_dims', (['lrImg'], {'axis': '(0)'}), '(lrImg, axis=0)\n', (8747, 8762), True, 'import numpy as np\n'), ((8788, 8817), 'numpy.expand_dims', 'np.expand_dims', (['hrImg'], {'axis': '(0)'}), '(hrImg, axis=0)\n', (8802, 8817), True, 'import numpy as np\n'), ((4358, 4381), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['n_feats'], {}), '(n_feats)\n', (4372, 4381), False, 'from torch import nn\n'), ((10067, 10080), 'numpy.sum', 'np.sum', (['hrImg'], {}), '(hrImg)\n', (10073, 10080), True, 'import numpy as np\n')] |
'''
Created on 22.02.2021
@author: <NAME>
'''
from ImageBot.image_processing.general import expand_canvas
from math import sqrt
import numpy as np
import imgaug.augmenters as iaa
from ..Config import MODEL_MAX_SCALE, MODEL_MIN_SCALE, MODEL_MIN_ROT,\
MODEL_MAX_ROT, MODEL_PERCENTAGE_FLIP, MODEL_PERSPECTIVE_MIN_TRANSFORMATION, \
MODEL_PERSPECTIVE_MAX_TRANSFORMATION, MODEL_MASK_CUTOUT_ITERATIONS,\
MODEL_MASK_CUTOUT_SIZE, MODEL_MASK_CUTOUT_PROB,\
MODEL_MULTIPLY_MESSAGE_IMAGE_AUGMENTATION,\
MODEL_MULTIPLY_MESSAGE_MASK_AUGMENTATION
from imgaug.augmentables.heatmaps import HeatmapsOnImage
from ImageBot.infrastructure.ImageMessage import ImageMessage
import uuid
from ImageBot.image_processing.masks import mask_bounding_box
def w_crop_to_mask(message):
# We do not use crop to mask here, because it is slower calling it two times
# because the bounding box must be calculated accordingly
bb = mask_bounding_box(message.mask)
margin = 20
if bb is not None:
# Add 5 Pixel on each side to bb
bb[0][0] = max(0, bb[0][0] - margin)
bb[0][1] = max(0, bb[0][1] - margin)
bb[1][0] = min(message.image.shape[1] - 1, bb[1][0] + margin)
bb[1][1] = min(message.image.shape[0] - 1, bb[1][1] + margin)
message.image = message.image[bb[0][1]:bb[1][1], bb[0][0]:bb[1][0]]
message.mask = message.mask[bb[0][1]:bb[1][1], bb[0][0]:bb[1][0]]
return message
def w_expand_for_max_affine(message):
max_addition = int((sqrt(2)-1) * max(message.image.shape[0], message.image.shape[1]) * MODEL_MAX_SCALE) + 1
message.image = expand_canvas(message.image, max_addition)
message.mask = expand_canvas(message.mask, max_addition)
return message
def w_model_augement(messages):
# Convert the image to uint8
for message in messages:
message.image = np.uint8(message.image*255.0)
# Leave at least each of one image as it is
identity_messages = list(messages)
# Imgaug lib only supports masks of boolean value, thus we use heatmaps
# Multiply some messages
new_heatmaps = []
new_images = []
origins = []
greens = []
for message in messages:
new_heatmaps.extend([HeatmapsOnImage(np.float32(message.mask), shape=message.image.shape, min_value=0.0, max_value=1.0) for _ in range(MODEL_MULTIPLY_MESSAGE_IMAGE_AUGMENTATION-1)])
new_images.extend([message.image for _ in range(MODEL_MULTIPLY_MESSAGE_IMAGE_AUGMENTATION-1)])
#origins.extend([message.origin for _ in range(MODEL_MULTIPLY_MESSAGE_IMAGE_AUGMENTATION-1)])
greens.extend([message.green for _ in range(MODEL_MULTIPLY_MESSAGE_IMAGE_AUGMENTATION-1)])
seq = iaa.Sequential([
# Make a random persective transformation
iaa.PerspectiveTransform(scale=(MODEL_PERSPECTIVE_MIN_TRANSFORMATION, MODEL_PERSPECTIVE_MAX_TRANSFORMATION)),
# Flip 50% of the images horizontally
iaa.Fliplr(MODEL_PERCENTAGE_FLIP),
# Apply an affine transformation to the images
iaa.Affine(scale={"x": (MODEL_MIN_SCALE, MODEL_MAX_SCALE), "y": (MODEL_MIN_SCALE, MODEL_MAX_SCALE)}, rotate=(MODEL_MIN_ROT, MODEL_MAX_ROT))
])
# Do the processing
images, heatmaps = seq(images=new_images, heatmaps=new_heatmaps)
# Convert it back to the original file format that we use
result = [ImageMessage(uuid.uuid4(), image/255.0, np.float64(heatmaps[i].get_arr()), greens[i].copy()) for i, image in enumerate(images)]
for im in identity_messages:
im.image = im.image/255.0
result.extend(identity_messages)
return result
def w_augment_mask(messages):
# TODO: Keep one untouched
# Convert the image to uint8
for message in messages:
message.mask = np.uint8(message.mask*255.0)
# Leave at least each of one image as it is
identity_messages = list(messages)
# Multiply some messages
new_masks = []
origins = []
greens = []
images = []
for message in messages:
new_masks.extend([message.mask for _ in range(MODEL_MULTIPLY_MESSAGE_MASK_AUGMENTATION-1)])
#origins.extend([message.origin for _ in range(MODEL_MULTIPLY_MESSAGE_MASK_AUGMENTATION-1)])
images.extend([message.image for _ in range(MODEL_MULTIPLY_MESSAGE_MASK_AUGMENTATION-1)])
greens.extend([message.green for _ in range(MODEL_MULTIPLY_MESSAGE_MASK_AUGMENTATION-1)])
# Setup manipulation to cover random types
seq_mask = iaa.Sequential([
iaa.Sometimes(MODEL_MASK_CUTOUT_PROB, iaa.Cutout(nb_iterations=MODEL_MASK_CUTOUT_ITERATIONS, size=MODEL_MASK_CUTOUT_SIZE, fill_mode="constant", cval=0))
])
# Convert to the right value and apply
masks = seq_mask(images=new_masks)
# Convert it back and merge it with the identity messages
result = [ImageMessage(uuid.uuid4(), images[i].copy(), mask/255.0, greens[i].copy()) for i, mask in enumerate(masks)]
for im in identity_messages:
# Convert back to correct dimensions
im.mask = np.float64(im.mask)/255.0
# Add it to the result
result.append(im)
return result
| [
"numpy.uint8",
"ImageBot.image_processing.general.expand_canvas",
"numpy.float64",
"imgaug.augmenters.Affine",
"math.sqrt",
"uuid.uuid4",
"imgaug.augmenters.PerspectiveTransform",
"ImageBot.image_processing.masks.mask_bounding_box",
"imgaug.augmenters.Fliplr",
"numpy.float32",
"imgaug.augmenters... | [((927, 958), 'ImageBot.image_processing.masks.mask_bounding_box', 'mask_bounding_box', (['message.mask'], {}), '(message.mask)\n', (944, 958), False, 'from ImageBot.image_processing.masks import mask_bounding_box\n'), ((1610, 1652), 'ImageBot.image_processing.general.expand_canvas', 'expand_canvas', (['message.image', 'max_addition'], {}), '(message.image, max_addition)\n', (1623, 1652), False, 'from ImageBot.image_processing.general import expand_canvas\n'), ((1672, 1713), 'ImageBot.image_processing.general.expand_canvas', 'expand_canvas', (['message.mask', 'max_addition'], {}), '(message.mask, max_addition)\n', (1685, 1713), False, 'from ImageBot.image_processing.general import expand_canvas\n'), ((1856, 1887), 'numpy.uint8', 'np.uint8', (['(message.image * 255.0)'], {}), '(message.image * 255.0)\n', (1864, 1887), True, 'import numpy as np\n'), ((3770, 3800), 'numpy.uint8', 'np.uint8', (['(message.mask * 255.0)'], {}), '(message.mask * 255.0)\n', (3778, 3800), True, 'import numpy as np\n'), ((2776, 2888), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {'scale': '(MODEL_PERSPECTIVE_MIN_TRANSFORMATION, MODEL_PERSPECTIVE_MAX_TRANSFORMATION)'}), '(scale=(MODEL_PERSPECTIVE_MIN_TRANSFORMATION,\n MODEL_PERSPECTIVE_MAX_TRANSFORMATION))\n', (2800, 2888), True, 'import imgaug.augmenters as iaa\n'), ((2940, 2973), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['MODEL_PERCENTAGE_FLIP'], {}), '(MODEL_PERCENTAGE_FLIP)\n', (2950, 2973), True, 'import imgaug.augmenters as iaa\n'), ((3038, 3182), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': (MODEL_MIN_SCALE, MODEL_MAX_SCALE), 'y': (MODEL_MIN_SCALE,\n MODEL_MAX_SCALE)}", 'rotate': '(MODEL_MIN_ROT, MODEL_MAX_ROT)'}), "(scale={'x': (MODEL_MIN_SCALE, MODEL_MAX_SCALE), 'y': (\n MODEL_MIN_SCALE, MODEL_MAX_SCALE)}, rotate=(MODEL_MIN_ROT, MODEL_MAX_ROT))\n", (3048, 3182), True, 'import imgaug.augmenters as iaa\n'), ((3381, 3393), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3391, 3393), False, 'import uuid\n'), ((4862, 4874), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4872, 4874), False, 'import uuid\n'), ((5054, 5073), 'numpy.float64', 'np.float64', (['im.mask'], {}), '(im.mask)\n', (5064, 5073), True, 'import numpy as np\n'), ((4555, 4673), 'imgaug.augmenters.Cutout', 'iaa.Cutout', ([], {'nb_iterations': 'MODEL_MASK_CUTOUT_ITERATIONS', 'size': 'MODEL_MASK_CUTOUT_SIZE', 'fill_mode': '"""constant"""', 'cval': '(0)'}), "(nb_iterations=MODEL_MASK_CUTOUT_ITERATIONS, size=\n MODEL_MASK_CUTOUT_SIZE, fill_mode='constant', cval=0)\n", (4565, 4673), True, 'import imgaug.augmenters as iaa\n'), ((2237, 2261), 'numpy.float32', 'np.float32', (['message.mask'], {}), '(message.mask)\n', (2247, 2261), True, 'import numpy as np\n'), ((1502, 1509), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1506, 1509), False, 'from math import sqrt\n')] |
import joblib
import numpy as np
from flask import Flask, app
from flask import jsonify # herramienta para trabajar cno arch json
app = Flask(__name__)
#<NAME>
@app.route('/predict', methods=['GET'])
def predict():
"""Funcion que se expondra en la direccion 8080/predict y que muestra la prediccion hecha
por nuestro modelo que exportamos al archivo best_model.pkl"""
X_test = np.array([7.594444821,7.479555538,1.616463184,1.53352356,0.796666503,0.635422587,0.362012237,0.315963835,2.277026653])
prediction = model.predict(X_test.reshape(1, -1))
return jsonify({'prediccion': list(prediction)})
if __name__ == "__main__":
model = joblib.load('./project/models/best_model.pkl')
app.run(port=8080) | [
"flask.app.run",
"flask.Flask",
"flask.app.route",
"numpy.array",
"joblib.load"
] | [((138, 153), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (143, 153), False, 'from flask import Flask, app\n'), ((164, 202), 'flask.app.route', 'app.route', (['"""/predict"""'], {'methods': "['GET']"}), "('/predict', methods=['GET'])\n", (173, 202), False, 'from flask import Flask, app\n'), ((392, 523), 'numpy.array', 'np.array', (['[7.594444821, 7.479555538, 1.616463184, 1.53352356, 0.796666503, \n 0.635422587, 0.362012237, 0.315963835, 2.277026653]'], {}), '([7.594444821, 7.479555538, 1.616463184, 1.53352356, 0.796666503, \n 0.635422587, 0.362012237, 0.315963835, 2.277026653])\n', (400, 523), True, 'import numpy as np\n'), ((659, 705), 'joblib.load', 'joblib.load', (['"""./project/models/best_model.pkl"""'], {}), "('./project/models/best_model.pkl')\n", (670, 705), False, 'import joblib\n'), ((710, 728), 'flask.app.run', 'app.run', ([], {'port': '(8080)'}), '(port=8080)\n', (717, 728), False, 'from flask import Flask, app\n')] |
# region imports
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_log_error
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score, cross_val_predict
import lightgbm
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from src.features.impute_columns import (impute_categorical_columns, impute_numeric_columns)
from src.contracts.Dataset import Dataset
from src.features.standardize import standardize
from src.features.label_encoder import MultiColumnLabelEncoder
# endregion
def create_log_transform(full_ds, column_name):
full_ds[column_name + '_log'] = np.log(full_ds[column_name]+3)
return full_ds | [
"numpy.log"
] | [((826, 858), 'numpy.log', 'np.log', (['(full_ds[column_name] + 3)'], {}), '(full_ds[column_name] + 3)\n', (832, 858), True, 'import numpy as np\n')] |
import scipy.io.wavfile as scwav
import numpy as np
import pylab
import librosa
import pyworld as pw
import os
import scipy.io as scio
from glob import glob
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from sklearn.manifold import TSNE
def _power_to_db(S):
return 20*np.log10(S)
def _get_spect(filename, dim=8, mfcc=True):
sr, data = scwav.read(filename=filename)
data = np.asarray(data, np.float64)
_, spect, _ = pw.wav2world(data, sr, frame_period=5)
if spect.shape[0] > 128:
q = np.random.randint(0, spect.shape[0] - 128)
spect = spect[q:q+128]
u_mat, s_mat, v_mat = np.linalg.svd(spect)
rank1_appx = s_mat[0] * np.dot(u_mat[:,0:1], v_mat[0:1,:])
rank2_appx = rank1_appx + (s_mat[1] * np.dot(u_mat[:,1:2], v_mat[1:2,:]))
rank3_appx = rank2_appx + (s_mat[2] * np.dot(u_mat[:,2:3], v_mat[2:3,:]))
rank4_appx = rank3_appx + (s_mat[3] * np.dot(u_mat[:,3:4], v_mat[3:4,:]))
rank5_appx = rank4_appx + (s_mat[4] * np.dot(u_mat[:,4:5], v_mat[4:5,:]))
rank6_appx = rank5_appx + (s_mat[5] * np.dot(u_mat[:,5:6], v_mat[5:6,:]))
rank7_appx = rank6_appx + (s_mat[6] * np.dot(u_mat[:,6:7], v_mat[6:7,:]))
rank8_appx = rank7_appx + (s_mat[7] * np.dot(u_mat[:,7:8], v_mat[7:8,:]))
if mfcc:
mfc1 = pw.code_spectral_envelope(np.abs(rank1_appx), sr, dim)
mfc2 = pw.code_spectral_envelope(np.abs(rank2_appx), sr, dim)
mfc3 = pw.code_spectral_envelope(np.abs(rank3_appx), sr, dim)
mfc4 = pw.code_spectral_envelope(np.abs(rank4_appx), sr, dim)
mfc5 = pw.code_spectral_envelope(np.abs(rank5_appx), sr, dim)
mfc6 = pw.code_spectral_envelope(np.abs(rank6_appx), sr, dim)
mfc7 = pw.code_spectral_envelope(np.abs(rank7_appx), sr, dim)
mfc8 = pw.code_spectral_envelope(np.abs(rank8_appx), sr, dim)
else:
mfc1 = rank1_appx
mfc2 = None
mfc3 = None
mfc4 = None
mfc5 = None
mfc6 = None
mfc7 = None
mfc8 = None
return [mfc1, mfc2, mfc3, mfc4, mfc5, mfc6, mfc7, mfc8]
else:
return None
def _get_spect_no_abs(filename, dim=8, mfcc=True):
sr, data = scwav.read(filename=filename)
data = np.asarray(data, np.float64)
_, spect, _ = pw.wav2world(data, sr, frame_period=5)
if spect.shape[0] > 128:
q = np.random.randint(0, spect.shape[0] - 128)
spect = spect[q:q+128]
u_mat, s_mat, v_mat = np.linalg.svd(spect)
rank1_appx = s_mat[0] * np.dot(u_mat[:,0:1], v_mat[0:1,:])
rank2_appx = rank1_appx + (s_mat[1] * np.dot(u_mat[:,1:2], v_mat[1:2,:]))
rank3_appx = rank2_appx + (s_mat[2] * np.dot(u_mat[:,2:3], v_mat[2:3,:]))
rank4_appx = rank3_appx + (s_mat[3] * np.dot(u_mat[:,3:4], v_mat[3:4,:]))
rank5_appx = rank4_appx + (s_mat[4] * np.dot(u_mat[:,4:5], v_mat[4:5,:]))
rank6_appx = rank5_appx + (s_mat[5] * np.dot(u_mat[:,5:6], v_mat[5:6,:]))
rank7_appx = rank6_appx + (s_mat[6] * np.dot(u_mat[:,6:7], v_mat[6:7,:]))
rank8_appx = rank7_appx + (s_mat[7] * np.dot(u_mat[:,7:8], v_mat[7:8,:]))
if mfcc:
mfc1 = pw.code_spectral_envelope(rank1_appx, sr, dim)
mfc2 = pw.code_spectral_envelope(rank2_appx, sr, dim)
mfc3 = pw.code_spectral_envelope(rank3_appx, sr, dim)
mfc4 = pw.code_spectral_envelope(rank4_appx, sr, dim)
mfc5 = pw.code_spectral_envelope(rank5_appx, sr, dim)
mfc6 = pw.code_spectral_envelope(rank6_appx, sr, dim)
mfc7 = pw.code_spectral_envelope(rank7_appx, sr, dim)
mfc8 = pw.code_spectral_envelope(rank8_appx, sr, dim)
else:
mfc1 = rank1_appx
mfc2 = None
mfc3 = None
mfc4 = None
mfc5 = None
mfc6 = None
mfc7 = None
mfc8 = None
return [mfc1, mfc2, mfc3, mfc4, mfc5, mfc6, mfc7, mfc8]
else:
return None
if __name__ == '__main__':
# sample_rate = 16000
# window_len = 0.005
# wav_file = '38.wav'
# files = sorted(glob(os.path.join('/home/ravi/Downloads/Emo-Conv/neutral-angry/train/neutral', '*.wav')))
# wav_files = [os.path.basename(f) for f in files]
#
# min_val = []
# max_val = []
# for w in wav_files:
# src = scwav.read(os.path.join('/home/ravi/Downloads/Emo-Conv/neutral-angry/train/neutral', w))
# src = np.asarray(src[1], np.float64)
# f0_src, sp_src, ap_src = pw.wav2world(src, 16000, frame_period=5)
# mfc_src = pw.code_spectral_envelope(sp_src, 16000, 23)
#
# tar = scwav.read(os.path.join('/home/ravi/Downloads/Emo-Conv/neutral-angry/train/angry', w))
# tar = np.asarray(tar[1], np.float64)
# f0_tar, sp_tar, ap_tar = pw.wav2world(tar, 16000, frame_period=5)
# mfc_tar = pw.code_spectral_envelope(sp_tar, 16000, 23)
#
# src_mfcc = librosa.feature.mfcc(y=src, sr=sample_rate, \
# hop_length=int(sample_rate*window_len), \
# win_length=int(sample_rate*window_len), \
# n_fft=1024, n_mels=128)
#
# tar_mfcc = librosa.feature.mfcc(y=tar, sr=sample_rate, \
# hop_length=int(sample_rate*window_len), \
# win_length=int(sample_rate*window_len), \
# n_fft=1024, n_mels=128)
#
# _, cords = librosa.sequence.dtw(X=src_mfcc, Y=tar_mfcc, metric='cosine')
# cords = np.flipud(cords)
# sp_src = sp_src[cords[:,0],:]
# sp_tar = sp_tar[cords[:,1],:]
# for i in range(10):
# q = np.random.randint(0, len(cords))
# pylab.figure(), pylab.subplot(211)
# pylab.plot(sp_src[cords[q,0],:], label='neutral')
# pylab.plot(sp_tar[cords[q,1],:], label='angry')
# pylab.grid(), pylab.title('Slice %d' % q), pylab.legend(loc=1)
#
# pylab.subplot(212)
# pylab.plot(mfc_src[cords[q,0],:], label='neutral')
# pylab.plot(mfc_tar[cords[q,1],:], label='angry')
# pylab.grid(), pylab.title('Slice %d' % q), pylab.legend(loc=1)
# u_src, sigma_src, v_src = np.linalg.svd(sp_src)
# u_tar, sigma_tar, v_tar = np.linalg.svd(sp_tar)
#
# s_mat = np.zeros(sp_src.shape)
# t_mat = np.zeros(sp_tar.shape)
# s_mat_array = []
# t_mat_array = []
# for i in range(min([u_src.shape[0], v_src.shape[0]])):
# x = np.dot(u_src[:,i:i+1], v_src[i:i+1,:])
# s_mat += sigma_src[i]*x
# s_mat_array.append(s_mat)
# pylab.figure(figsize=(15,15)), pylab.imshow(_power_to_db(s_mat.T ** 2))
# pylab.suptitle('#Components %d' % (i+1))
# pylab.savefig('/home/ravi/Desktop/svd_recon/src_'+str(i)+'.png')
# pylab.close()
#
# for i in range(min([u_tar.shape[0], v_tar.shape[0]])):
# y = np.dot(u_tar[:,i:i+1], v_tar[i:i+1,:])
# t_mat += sigma_tar[i]*y
# t_mat_array.append(t_mat)
# pylab.figure(figsize=(15,15)), pylab.imshow(_power_to_db(s_mat.T ** 2))
# pylab.suptitle('#Components %d' % (i+1))
# pylab.savefig('/home/ravi/Desktop/svd_recon/tar_'+str(i)+'.png')
# pylab.close()
#
# break
# s_mfc_array = np.asarray([pw.code_spectral_envelope(s, 16000, 4) for s in s_mat_array])
# t_mfc_array = np.asarray([pw.code_spectral_envelope(t, 16000, 4) for t in t_mat_array])
#
# print(w)
# min_val.append((np.min(s_mfc_array) ,np.min(t_mfc_array)))
# max_val.append((np.max(s_mfc_array) ,np.max(t_mfc_array)))
"""
Cohort analysis
"""
src_list = sorted(glob(os.path.join('/home/ravi/Downloads/Emo-Conv/neutral-angry/train/neutral', '*.wav')))
tar_list = sorted(glob(os.path.join('/home/ravi/Downloads/Emo-Conv/neutral-angry/train/angry', '*.wav')))
executor = ProcessPoolExecutor(max_workers=8)
src_futures = []
tar_futures = []
src_results = []
tar_results = []
dim = 8
times_sampling = 2
for sampling in range(times_sampling):
for i in src_list:
src_futures.append(executor.submit(partial(_get_spect_no_abs, i, dim, False)))
# src_results.append(_get_spect_no_abs(i, dim))
# print(i)
src_results = [src_future.result() for src_future in tqdm(src_futures)]
for sampling in range(times_sampling):
for i in tar_list:
tar_futures.append(executor.submit(partial(_get_spect_no_abs, i, dim, False)))
# tar_results.append(_get_spect_no_abs(i, dim))
# print(i)
tar_results = [tar_future.result() for tar_future in tqdm(tar_futures)]
src_mfcc = [i for i,j in zip(src_results, tar_results) if i!=None and j!=None]
tar_mfcc = [j for i,j in zip(src_results, tar_results) if i!=None and j!=None]
src_rank1 = np.asarray([i[0] for i in src_mfcc])
src_rank2 = np.asarray([i[1] for i in src_mfcc])
src_rank3 = np.asarray([i[2] for i in src_mfcc])
src_rank4 = np.asarray([i[3] for i in src_mfcc])
src_rank5 = np.asarray([i[4] for i in src_mfcc])
src_rank6 = np.asarray([i[5] for i in src_mfcc])
src_rank7 = np.asarray([i[6] for i in src_mfcc])
src_rank8 = np.asarray([i[7] for i in src_mfcc])
tar_rank1 = np.asarray([i[0] for i in tar_mfcc])
tar_rank2 = np.asarray([i[1] for i in tar_mfcc])
tar_rank3 = np.asarray([i[2] for i in tar_mfcc])
tar_rank4 = np.asarray([i[3] for i in tar_mfcc])
tar_rank5 = np.asarray([i[4] for i in tar_mfcc])
tar_rank6 = np.asarray([i[5] for i in tar_mfcc])
tar_rank7 = np.asarray([i[6] for i in tar_mfcc])
tar_rank8 = np.asarray([i[7] for i in tar_mfcc])
src_ranks = [src_rank1, src_rank2, src_rank3, src_rank4, src_rank5, src_rank6, src_rank7, src_rank8]
tar_ranks = [tar_rank1, tar_rank2, tar_rank3, tar_rank4, tar_rank5, tar_rank6, tar_rank7, tar_rank8]
#
# n_data = src_rank1.shape[0]
# kl_div = []
# norm_v = []
# for i in range(8):
# try:
# tsne = TSNE(n_components=2, n_iter=2000, verbose=True)
# embed_rank = tsne.fit_transform(np.concatenate((src_ranks[i].reshape(-1,128*dim),
# tar_ranks[i].reshape(-1,128*dim)),
# axis=0))
# norm_v.append(np.linalg.norm(np.mean(embed_rank[:n_data]) - np.mean(embed_rank[n_data:])))
# kl_div.append(tsne.kl_divergence_)
# pylab.figure()
# pylab.plot(embed_rank[:n_data,0], embed_rank[:n_data,1], 'r.')
# pylab.plot(embed_rank[n_data:,0], embed_rank[n_data:,1], 'b.')
# pylab.title('Rank %d' % (i+1))
# print('######################## Norm is %f ############' % norm_v[-1])
# except Exception as ex:
# print(ex)
| [
"numpy.abs",
"numpy.log10",
"pyworld.wav2world",
"pyworld.code_spectral_envelope",
"tqdm.tqdm",
"numpy.asarray",
"os.path.join",
"numpy.random.randint",
"numpy.dot",
"scipy.io.wavfile.read",
"functools.partial",
"concurrent.futures.ProcessPoolExecutor",
"numpy.linalg.svd"
] | [((405, 434), 'scipy.io.wavfile.read', 'scwav.read', ([], {'filename': 'filename'}), '(filename=filename)\n', (415, 434), True, 'import scipy.io.wavfile as scwav\n'), ((446, 474), 'numpy.asarray', 'np.asarray', (['data', 'np.float64'], {}), '(data, np.float64)\n', (456, 474), True, 'import numpy as np\n'), ((493, 531), 'pyworld.wav2world', 'pw.wav2world', (['data', 'sr'], {'frame_period': '(5)'}), '(data, sr, frame_period=5)\n', (505, 531), True, 'import pyworld as pw\n'), ((2353, 2382), 'scipy.io.wavfile.read', 'scwav.read', ([], {'filename': 'filename'}), '(filename=filename)\n', (2363, 2382), True, 'import scipy.io.wavfile as scwav\n'), ((2394, 2422), 'numpy.asarray', 'np.asarray', (['data', 'np.float64'], {}), '(data, np.float64)\n', (2404, 2422), True, 'import numpy as np\n'), ((2441, 2479), 'pyworld.wav2world', 'pw.wav2world', (['data', 'sr'], {'frame_period': '(5)'}), '(data, sr, frame_period=5)\n', (2453, 2479), True, 'import pyworld as pw\n'), ((8365, 8399), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': '(8)'}), '(max_workers=8)\n', (8384, 8399), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((9354, 9390), 'numpy.asarray', 'np.asarray', (['[i[0] for i in src_mfcc]'], {}), '([i[0] for i in src_mfcc])\n', (9364, 9390), True, 'import numpy as np\n'), ((9407, 9443), 'numpy.asarray', 'np.asarray', (['[i[1] for i in src_mfcc]'], {}), '([i[1] for i in src_mfcc])\n', (9417, 9443), True, 'import numpy as np\n'), ((9460, 9496), 'numpy.asarray', 'np.asarray', (['[i[2] for i in src_mfcc]'], {}), '([i[2] for i in src_mfcc])\n', (9470, 9496), True, 'import numpy as np\n'), ((9513, 9549), 'numpy.asarray', 'np.asarray', (['[i[3] for i in src_mfcc]'], {}), '([i[3] for i in src_mfcc])\n', (9523, 9549), True, 'import numpy as np\n'), ((9566, 9602), 'numpy.asarray', 'np.asarray', (['[i[4] for i in src_mfcc]'], {}), '([i[4] for i in src_mfcc])\n', (9576, 9602), True, 'import numpy as np\n'), ((9619, 9655), 'numpy.asarray', 'np.asarray', (['[i[5] for i in src_mfcc]'], {}), '([i[5] for i in src_mfcc])\n', (9629, 9655), True, 'import numpy as np\n'), ((9672, 9708), 'numpy.asarray', 'np.asarray', (['[i[6] for i in src_mfcc]'], {}), '([i[6] for i in src_mfcc])\n', (9682, 9708), True, 'import numpy as np\n'), ((9725, 9761), 'numpy.asarray', 'np.asarray', (['[i[7] for i in src_mfcc]'], {}), '([i[7] for i in src_mfcc])\n', (9735, 9761), True, 'import numpy as np\n'), ((9779, 9815), 'numpy.asarray', 'np.asarray', (['[i[0] for i in tar_mfcc]'], {}), '([i[0] for i in tar_mfcc])\n', (9789, 9815), True, 'import numpy as np\n'), ((9832, 9868), 'numpy.asarray', 'np.asarray', (['[i[1] for i in tar_mfcc]'], {}), '([i[1] for i in tar_mfcc])\n', (9842, 9868), True, 'import numpy as np\n'), ((9885, 9921), 'numpy.asarray', 'np.asarray', (['[i[2] for i in tar_mfcc]'], {}), '([i[2] for i in tar_mfcc])\n', (9895, 9921), True, 'import numpy as np\n'), ((9938, 9974), 'numpy.asarray', 'np.asarray', (['[i[3] for i in tar_mfcc]'], {}), '([i[3] for i in tar_mfcc])\n', (9948, 9974), True, 'import numpy as np\n'), ((9991, 10027), 'numpy.asarray', 'np.asarray', (['[i[4] for i in tar_mfcc]'], {}), '([i[4] for i in tar_mfcc])\n', (10001, 10027), True, 'import numpy as np\n'), ((10044, 10080), 'numpy.asarray', 'np.asarray', (['[i[5] for i in tar_mfcc]'], {}), '([i[5] for i in tar_mfcc])\n', (10054, 10080), True, 'import numpy as np\n'), ((10097, 10133), 'numpy.asarray', 'np.asarray', (['[i[6] for i in tar_mfcc]'], {}), '([i[6] for i in tar_mfcc])\n', (10107, 10133), True, 'import numpy as np\n'), ((10150, 10186), 'numpy.asarray', 'np.asarray', (['[i[7] for i in tar_mfcc]'], {}), '([i[7] for i in tar_mfcc])\n', (10160, 10186), True, 'import numpy as np\n'), ((332, 343), 'numpy.log10', 'np.log10', (['S'], {}), '(S)\n', (340, 343), True, 'import numpy as np\n'), ((573, 615), 'numpy.random.randint', 'np.random.randint', (['(0)', '(spect.shape[0] - 128)'], {}), '(0, spect.shape[0] - 128)\n', (590, 615), True, 'import numpy as np\n'), ((677, 697), 'numpy.linalg.svd', 'np.linalg.svd', (['spect'], {}), '(spect)\n', (690, 697), True, 'import numpy as np\n'), ((2521, 2563), 'numpy.random.randint', 'np.random.randint', (['(0)', '(spect.shape[0] - 128)'], {}), '(0, spect.shape[0] - 128)\n', (2538, 2563), True, 'import numpy as np\n'), ((2625, 2645), 'numpy.linalg.svd', 'np.linalg.svd', (['spect'], {}), '(spect)\n', (2638, 2645), True, 'import numpy as np\n'), ((730, 766), 'numpy.dot', 'np.dot', (['u_mat[:, 0:1]', 'v_mat[0:1, :]'], {}), '(u_mat[:, 0:1], v_mat[0:1, :])\n', (736, 766), True, 'import numpy as np\n'), ((2678, 2714), 'numpy.dot', 'np.dot', (['u_mat[:, 0:1]', 'v_mat[0:1, :]'], {}), '(u_mat[:, 0:1], v_mat[0:1, :])\n', (2684, 2714), True, 'import numpy as np\n'), ((3332, 3378), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['rank1_appx', 'sr', 'dim'], {}), '(rank1_appx, sr, dim)\n', (3357, 3378), True, 'import pyworld as pw\n'), ((3398, 3444), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['rank2_appx', 'sr', 'dim'], {}), '(rank2_appx, sr, dim)\n', (3423, 3444), True, 'import pyworld as pw\n'), ((3464, 3510), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['rank3_appx', 'sr', 'dim'], {}), '(rank3_appx, sr, dim)\n', (3489, 3510), True, 'import pyworld as pw\n'), ((3530, 3576), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['rank4_appx', 'sr', 'dim'], {}), '(rank4_appx, sr, dim)\n', (3555, 3576), True, 'import pyworld as pw\n'), ((3596, 3642), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['rank5_appx', 'sr', 'dim'], {}), '(rank5_appx, sr, dim)\n', (3621, 3642), True, 'import pyworld as pw\n'), ((3662, 3708), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['rank6_appx', 'sr', 'dim'], {}), '(rank6_appx, sr, dim)\n', (3687, 3708), True, 'import pyworld as pw\n'), ((3728, 3774), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['rank7_appx', 'sr', 'dim'], {}), '(rank7_appx, sr, dim)\n', (3753, 3774), True, 'import pyworld as pw\n'), ((3794, 3840), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['rank8_appx', 'sr', 'dim'], {}), '(rank8_appx, sr, dim)\n', (3819, 3840), True, 'import pyworld as pw\n'), ((8150, 8236), 'os.path.join', 'os.path.join', (['"""/home/ravi/Downloads/Emo-Conv/neutral-angry/train/neutral"""', '"""*.wav"""'], {}), "('/home/ravi/Downloads/Emo-Conv/neutral-angry/train/neutral',\n '*.wav')\n", (8162, 8236), False, 'import os\n'), ((8262, 8347), 'os.path.join', 'os.path.join', (['"""/home/ravi/Downloads/Emo-Conv/neutral-angry/train/angry"""', '"""*.wav"""'], {}), "('/home/ravi/Downloads/Emo-Conv/neutral-angry/train/angry', '*.wav'\n )\n", (8274, 8347), False, 'import os\n'), ((8828, 8845), 'tqdm.tqdm', 'tqdm', (['src_futures'], {}), '(src_futures)\n', (8832, 8845), False, 'from tqdm import tqdm\n'), ((9151, 9168), 'tqdm.tqdm', 'tqdm', (['tar_futures'], {}), '(tar_futures)\n', (9155, 9168), False, 'from tqdm import tqdm\n'), ((811, 847), 'numpy.dot', 'np.dot', (['u_mat[:, 1:2]', 'v_mat[1:2, :]'], {}), '(u_mat[:, 1:2], v_mat[1:2, :])\n', (817, 847), True, 'import numpy as np\n'), ((893, 929), 'numpy.dot', 'np.dot', (['u_mat[:, 2:3]', 'v_mat[2:3, :]'], {}), '(u_mat[:, 2:3], v_mat[2:3, :])\n', (899, 929), True, 'import numpy as np\n'), ((975, 1011), 'numpy.dot', 'np.dot', (['u_mat[:, 3:4]', 'v_mat[3:4, :]'], {}), '(u_mat[:, 3:4], v_mat[3:4, :])\n', (981, 1011), True, 'import numpy as np\n'), ((1057, 1093), 'numpy.dot', 'np.dot', (['u_mat[:, 4:5]', 'v_mat[4:5, :]'], {}), '(u_mat[:, 4:5], v_mat[4:5, :])\n', (1063, 1093), True, 'import numpy as np\n'), ((1139, 1175), 'numpy.dot', 'np.dot', (['u_mat[:, 5:6]', 'v_mat[5:6, :]'], {}), '(u_mat[:, 5:6], v_mat[5:6, :])\n', (1145, 1175), True, 'import numpy as np\n'), ((1221, 1257), 'numpy.dot', 'np.dot', (['u_mat[:, 6:7]', 'v_mat[6:7, :]'], {}), '(u_mat[:, 6:7], v_mat[6:7, :])\n', (1227, 1257), True, 'import numpy as np\n'), ((1303, 1339), 'numpy.dot', 'np.dot', (['u_mat[:, 7:8]', 'v_mat[7:8, :]'], {}), '(u_mat[:, 7:8], v_mat[7:8, :])\n', (1309, 1339), True, 'import numpy as np\n'), ((1410, 1428), 'numpy.abs', 'np.abs', (['rank1_appx'], {}), '(rank1_appx)\n', (1416, 1428), True, 'import numpy as np\n'), ((1484, 1502), 'numpy.abs', 'np.abs', (['rank2_appx'], {}), '(rank2_appx)\n', (1490, 1502), True, 'import numpy as np\n'), ((1558, 1576), 'numpy.abs', 'np.abs', (['rank3_appx'], {}), '(rank3_appx)\n', (1564, 1576), True, 'import numpy as np\n'), ((1632, 1650), 'numpy.abs', 'np.abs', (['rank4_appx'], {}), '(rank4_appx)\n', (1638, 1650), True, 'import numpy as np\n'), ((1706, 1724), 'numpy.abs', 'np.abs', (['rank5_appx'], {}), '(rank5_appx)\n', (1712, 1724), True, 'import numpy as np\n'), ((1780, 1798), 'numpy.abs', 'np.abs', (['rank6_appx'], {}), '(rank6_appx)\n', (1786, 1798), True, 'import numpy as np\n'), ((1854, 1872), 'numpy.abs', 'np.abs', (['rank7_appx'], {}), '(rank7_appx)\n', (1860, 1872), True, 'import numpy as np\n'), ((1928, 1946), 'numpy.abs', 'np.abs', (['rank8_appx'], {}), '(rank8_appx)\n', (1934, 1946), True, 'import numpy as np\n'), ((2759, 2795), 'numpy.dot', 'np.dot', (['u_mat[:, 1:2]', 'v_mat[1:2, :]'], {}), '(u_mat[:, 1:2], v_mat[1:2, :])\n', (2765, 2795), True, 'import numpy as np\n'), ((2841, 2877), 'numpy.dot', 'np.dot', (['u_mat[:, 2:3]', 'v_mat[2:3, :]'], {}), '(u_mat[:, 2:3], v_mat[2:3, :])\n', (2847, 2877), True, 'import numpy as np\n'), ((2923, 2959), 'numpy.dot', 'np.dot', (['u_mat[:, 3:4]', 'v_mat[3:4, :]'], {}), '(u_mat[:, 3:4], v_mat[3:4, :])\n', (2929, 2959), True, 'import numpy as np\n'), ((3005, 3041), 'numpy.dot', 'np.dot', (['u_mat[:, 4:5]', 'v_mat[4:5, :]'], {}), '(u_mat[:, 4:5], v_mat[4:5, :])\n', (3011, 3041), True, 'import numpy as np\n'), ((3087, 3123), 'numpy.dot', 'np.dot', (['u_mat[:, 5:6]', 'v_mat[5:6, :]'], {}), '(u_mat[:, 5:6], v_mat[5:6, :])\n', (3093, 3123), True, 'import numpy as np\n'), ((3169, 3205), 'numpy.dot', 'np.dot', (['u_mat[:, 6:7]', 'v_mat[6:7, :]'], {}), '(u_mat[:, 6:7], v_mat[6:7, :])\n', (3175, 3205), True, 'import numpy as np\n'), ((3251, 3287), 'numpy.dot', 'np.dot', (['u_mat[:, 7:8]', 'v_mat[7:8, :]'], {}), '(u_mat[:, 7:8], v_mat[7:8, :])\n', (3257, 3287), True, 'import numpy as np\n'), ((8646, 8687), 'functools.partial', 'partial', (['_get_spect_no_abs', 'i', 'dim', '(False)'], {}), '(_get_spect_no_abs, i, dim, False)\n', (8653, 8687), False, 'from functools import partial\n'), ((8969, 9010), 'functools.partial', 'partial', (['_get_spect_no_abs', 'i', 'dim', '(False)'], {}), '(_get_spect_no_abs, i, dim, False)\n', (8976, 9010), False, 'from functools import partial\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import sys
import json
import numpy as np
THIS_DIR = os.path.dirname(__file__)
sys.path.append(os.path.join(THIS_DIR, os.pardir))
from confonnx.get_model_info import get_model_info
def create_test_inputs(model_info):
input_info = model_info['inputs']
inputs = {}
for name, info in input_info.items():
shape = info['shape']
shape = [s if isinstance(s, int) else 1 for s in shape]
dtype = info['type']
if dtype.startswith('float'):
val = np.random.random_sample(shape)
else:
dtinfo = np.iinfo(dtype)
val = np.random.randint(low=dtinfo.min, high=dtinfo.max, size=shape)
inputs[name] = {'type': dtype, 'values': val.tolist()}
return inputs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='Path to ONNX model', required=True)
parser.add_argument('--out', help='Path to randomly generated inference input data in JSON format',
required=True)
args = parser.parse_args()
if not os.path.exists(args.model):
parser.error('model file does not exist')
model_info = get_model_info(args.model)
data = create_test_inputs(model_info)
if args.out:
with open(args.out, 'w') as f:
json.dump(data, f)
print(f'Data written to {args.out}')
| [
"os.path.exists",
"numpy.random.random_sample",
"confonnx.get_model_info.get_model_info",
"argparse.ArgumentParser",
"os.path.join",
"numpy.iinfo",
"os.path.dirname",
"numpy.random.randint",
"json.dump"
] | [((175, 200), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (190, 200), False, 'import os\n'), ((217, 250), 'os.path.join', 'os.path.join', (['THIS_DIR', 'os.pardir'], {}), '(THIS_DIR, os.pardir)\n', (229, 250), False, 'import os\n'), ((909, 934), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (932, 934), False, 'import argparse\n'), ((1298, 1324), 'confonnx.get_model_info.get_model_info', 'get_model_info', (['args.model'], {}), '(args.model)\n', (1312, 1324), False, 'from confonnx.get_model_info import get_model_info\n'), ((1198, 1224), 'os.path.exists', 'os.path.exists', (['args.model'], {}), '(args.model)\n', (1212, 1224), False, 'import os\n'), ((616, 646), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (639, 646), True, 'import numpy as np\n'), ((682, 697), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (690, 697), True, 'import numpy as np\n'), ((716, 778), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'dtinfo.min', 'high': 'dtinfo.max', 'size': 'shape'}), '(low=dtinfo.min, high=dtinfo.max, size=shape)\n', (733, 778), True, 'import numpy as np\n'), ((1440, 1458), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (1449, 1458), False, 'import json\n')] |
from MovieRecommender import process_data
import numpy as np
from scipy.sparse import csr_matrix, load_npz
import random
import implicit
from sklearn import metrics
import os
import pickle
def test_train_split(sparse_user_item, pct_test=0.2):
'''
This function will take in the original user-item matrix and "mask" a
percentage of the original ratings where a user-item interaction has
taken place for use as a test set. The test set will contain all of
the original ratings, while the training set replaces the specified
percentage of them with a zero in the original ratings matrix.
parameters:
sparse_user_item - the original ratings sparse_user_item matrix from
which you want to generate a train/test set. Test is just a complete
copy of the original set. This is in the form of a sparse csr_matrix.
pct_test - The percentage of user-item interactions where an
interaction took place that you want to mask in the training set for
later comparison to the test set, which contains all of the original
ratings.
returns:
train_data - The altered version of the original data with a certain
percentage of the user-item pairs
that originally had interaction set back to zero.
test_data - A copy of the original ratings matrix, unaltered, so it
can be used to see how the rank order compares with the actual
interactions.
users_altered - From the randomly selected user-item indices, which
user rows were altered in the training data.This will be necessary
later when evaluating the performance via AUC.
'''
ratings = sparse_user_item
# Make a copy of the original set to be the test set.
test_set = ratings.copy()
# Store the test set as a binary preference matrix
test_set[test_set != 0] = 1
# Make a copy of the original data we can alter as our training set.
training_set = ratings.copy()
# Find the indices in the ratings data where an interaction exists
nonzero_inds = training_set.nonzero()
# Zip these pairs together of user,item index into list
nonzero_pairs = list(zip(nonzero_inds[0], nonzero_inds[1]))
random.seed(0) # Set the random seed to zero for reproducibility
# Round the number of samples needed to the nearest integer
num_samples = int(np.ceil(pct_test*len(nonzero_pairs)))
# Sample a random number of user-item pairs without replacement
samples = random.sample(nonzero_pairs, num_samples)
user_inds = [index[0] for index in samples] # Get the user row indices
item_inds = [index[1] for index in samples] # Get the item column indices
# Assign all of the randomly chosen user-item pairs to zero
training_set[user_inds, item_inds] = 0
# Get rid of zeros in sparse array storage after update to save space
training_set.eliminate_zeros()
# Output the unique list of user rows that were altered
train_data, test_data, users_altered = training_set, test_set, list(
set(user_inds))
print("Train test split done! ", train_data.shape, test_data.shape)
return train_data, test_data, users_altered
def train_model(train_data):
# Initialize the als model and fit it using the sparse item-user matrix
als_model = implicit.als.AlternatingLeastSquares(factors=20,
regularization=1e-3,
iterations=50)
# Calculate the confidence by multiplying it by our alpha value.
alpha_val = 40
# train data is of item-user format
data_conf = (train_data * alpha_val).astype('double')
als_model.fit(data_conf)
# Get the user and item vectors from our trained model
user_vecs = als_model.user_factors
item_vecs = als_model.item_factors
print("Model trained, user vectors and item vectors shape",
user_vecs.shape, item_vecs.shape)
return als_model, user_vecs, item_vecs
def evaluate_model(training_set, altered_users, predictions, test_set):
def auc_score(predictions, test):
fpr, tpr, thresholds = metrics.roc_curve(test, predictions)
return metrics.auc(fpr, tpr)
# Store AUC for each user that had item removed from training set
store_auc = []
# To store popular AUC scores
popularity_auc = []
# Get sum of item iteractions to find most popular
pop_items = np.array(test_set.sum(axis=0)).reshape(-1)
item_vecs = predictions[1]
# Iterate through each user that had an item altered
for user in altered_users:
# Get the training set row
training_row = training_set[user, :].toarray().reshape(-1)
# Find where the interaction had not yet occurred
zero_inds = np.where(training_row == 0)
# Get the predicted values based on our user/item vectors
user_vec = predictions[0][user, :]
pred = user_vec.dot(item_vecs).toarray()[0, zero_inds].reshape(-1)
# Get only the items that were originally zero
# Select ratings from MF prediction for user that had no interaction
actual = test_set[user, :].toarray()[0, zero_inds].reshape(-1)
# Select yes/no interaction pairs from the original full data
# Get the item popularity for our chosen items
pop = pop_items[zero_inds]
# Calculate AUC for the given user and store
store_auc.append(auc_score(pred, actual))
# Calculate AUC using most popular and score
popularity_auc.append(auc_score(pop, actual))
# End users interation
mean_s_auc = float('%.3f' % np.mean(store_auc))
mean_p_auc = float('%.3f' % np.mean(popularity_auc))
return mean_s_auc, mean_p_auc
def main():
process_data.main()
sparse_user_item = load_npz("./output/sparse_user_item.npz")
train_data, test_data, users_altered = test_train_split(sparse_user_item,
pct_test=0.2)
# the parameter to trail_model should be item - user matrix
als_model, user_vecs, item_vecs = train_model(train_data.T)
print("implicit_recomm_auc,popularity_auc", evaluate_model(train_data,
users_altered, [csr_matrix(user_vecs), csr_matrix(item_vecs.T)],
test_data))
directory = './output'
if not os.path.exists(directory):
os.makedirs(directory)
np.save('./output/item_vecs', item_vecs)
np.save('./output/user_vecs', user_vecs)
with open('./output/als_model', 'wb') as file:
pickle.dump(als_model, file)
with open('./output/train_data', 'wb') as train_file:
pickle.dump(train_data, train_file)
with open('./output/test_data', 'wb') as test_file:
pickle.dump(test_data, test_file)
if __name__ == "__main__":
main()
| [
"os.path.exists",
"random.sample",
"numpy.mean",
"pickle.dump",
"os.makedirs",
"numpy.where",
"scipy.sparse.load_npz",
"sklearn.metrics.auc",
"MovieRecommender.process_data.main",
"random.seed",
"sklearn.metrics.roc_curve",
"implicit.als.AlternatingLeastSquares",
"scipy.sparse.csr_matrix",
... | [((2163, 2177), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (2174, 2177), False, 'import random\n'), ((2435, 2476), 'random.sample', 'random.sample', (['nonzero_pairs', 'num_samples'], {}), '(nonzero_pairs, num_samples)\n', (2448, 2476), False, 'import random\n'), ((3296, 3385), 'implicit.als.AlternatingLeastSquares', 'implicit.als.AlternatingLeastSquares', ([], {'factors': '(20)', 'regularization': '(0.001)', 'iterations': '(50)'}), '(factors=20, regularization=0.001,\n iterations=50)\n', (3332, 3385), False, 'import implicit\n'), ((5744, 5763), 'MovieRecommender.process_data.main', 'process_data.main', ([], {}), '()\n', (5761, 5763), False, 'from MovieRecommender import process_data\n'), ((5787, 5828), 'scipy.sparse.load_npz', 'load_npz', (['"""./output/sparse_user_item.npz"""'], {}), "('./output/sparse_user_item.npz')\n", (5795, 5828), False, 'from scipy.sparse import csr_matrix, load_npz\n'), ((6382, 6422), 'numpy.save', 'np.save', (['"""./output/item_vecs"""', 'item_vecs'], {}), "('./output/item_vecs', item_vecs)\n", (6389, 6422), True, 'import numpy as np\n'), ((6427, 6467), 'numpy.save', 'np.save', (['"""./output/user_vecs"""', 'user_vecs'], {}), "('./output/user_vecs', user_vecs)\n", (6434, 6467), True, 'import numpy as np\n'), ((4133, 4169), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['test', 'predictions'], {}), '(test, predictions)\n', (4150, 4169), False, 'from sklearn import metrics\n'), ((4185, 4206), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (4196, 4206), False, 'from sklearn import metrics\n'), ((4767, 4794), 'numpy.where', 'np.where', (['(training_row == 0)'], {}), '(training_row == 0)\n', (4775, 4794), True, 'import numpy as np\n'), ((6320, 6345), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (6334, 6345), False, 'import os\n'), ((6355, 6377), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (6366, 6377), False, 'import os\n'), ((6528, 6556), 'pickle.dump', 'pickle.dump', (['als_model', 'file'], {}), '(als_model, file)\n', (6539, 6556), False, 'import pickle\n'), ((6624, 6659), 'pickle.dump', 'pickle.dump', (['train_data', 'train_file'], {}), '(train_data, train_file)\n', (6635, 6659), False, 'import pickle\n'), ((6725, 6758), 'pickle.dump', 'pickle.dump', (['test_data', 'test_file'], {}), '(test_data, test_file)\n', (6736, 6758), False, 'import pickle\n'), ((5615, 5633), 'numpy.mean', 'np.mean', (['store_auc'], {}), '(store_auc)\n', (5622, 5633), True, 'import numpy as np\n'), ((5667, 5690), 'numpy.mean', 'np.mean', (['popularity_auc'], {}), '(popularity_auc)\n', (5674, 5690), True, 'import numpy as np\n'), ((6210, 6231), 'scipy.sparse.csr_matrix', 'csr_matrix', (['user_vecs'], {}), '(user_vecs)\n', (6220, 6231), False, 'from scipy.sparse import csr_matrix, load_npz\n'), ((6233, 6256), 'scipy.sparse.csr_matrix', 'csr_matrix', (['item_vecs.T'], {}), '(item_vecs.T)\n', (6243, 6256), False, 'from scipy.sparse import csr_matrix, load_npz\n')] |
import os
import shutil
import numpy as np
from util import log_util
log = log_util.get_logger("file process")
def create_blank_file(file_name):
'''
create a blank file
:param file_name:
:return:
'''
with open(file_name, 'w') as wt:
wt.write("")
log.debug("blank file %s created.." % file_name)
def read_file_list_from_path(path, file_type=None, if_recursive=False):
'''
get all file list from path
:param path:
:param file_type:
:return:
'''
file_list = []
for file in os.listdir(path):
tmp_file = os.path.join(path, file)
if if_recursive:
if os.path.isfile(tmp_file):
if file_type:
if str(tmp_file).endswith(file_type):
file_list.append(tmp_file)
else:
file_list.append(tmp_file)
elif os.path.isdir(tmp_file):
file_list += read_file_list_from_path(tmp_file, file_type, if_recursive)
else:
if file_type is not None:
if file.endswith(file_type):
file_list.append(tmp_file)
else:
file_list.append(tmp_file)
file_list.sort()
return file_list
def read_file_by_line(filename):
'''
read every line of a file
:param filename: file to read
:return: list of line content in file
'''
line_list = []
with open(filename, 'r') as rd:
lines = rd.readlines()
for line in lines:
line = line.strip()
if len(line) < 1:
continue
line_list.append(line)
return line_list
def write2file(content, save_file):
'''
write content to file
:param content: content to write to file
:param save_file: where should content be written to
:return:
'''
with open(save_file, "w") as wt:
if isinstance(content, list):
for con in content:
wt.write(con + "\r")
else:
wt.write(content)
log.debug(" write content to %s " % save_file)
def copy_filepath(src_path, target_path):
'''
copy file from source to target
:param src_path:
:param target_path:
:return:
'''
shutil.copytree(src_path, target_path)
log.debug("copy directory from %s to %s finished.." % (src_path, target_path))
def del_path_list(path_list):
'''
delete file paths or file in list
:param path_list: file path (or file)list to be deleted
:return:
'''
for path in path_list:
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
elif os.path.isfile(path):
os.remove(path)
log.debug(" file path %s was deleted" % path)
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def create_path_list(path_list):
'''
create file path in list
:param path_list: file path to be created in list
:return:
'''
for path in path_list:
if not os.path.exists(path):
os.mkdir(path)
log.debug(" file path %s created " % path)
def read_binfile(filename, dim=60, dtype=np.float64):
'''
Reads binary file into numpy array.
'''
fid = open(filename, 'rb')
v_data = np.fromfile(fid, dtype=dtype)
fid.close()
if np.mod(v_data.size, dim) != 0:
raise ValueError('Dimension provided not compatible with file size.')
m_data = v_data.reshape((-1, dim)).astype('float64') # This is to keep compatibility with numpy default dtype.
m_data = np.squeeze(m_data)
return m_data
def write_binfile(m_data, filename, dtype=np.float64):
'''
Writes numpy array into binary file.
'''
m_data = np.array(m_data, dtype)
fid = open(filename, 'wb')
m_data.tofile(fid)
fid.close()
return
def array_to_binary_file(self, data, output_file_name):
data = np.array(data, 'float32')
fid = open(output_file_name, 'wb')
data.tofile(fid)
fid.close()
def load_binary_file_frame(self, file_name, dimension):
fid_lab = open(file_name, 'rb')
features = np.fromfile(fid_lab, dtype=np.float32)
fid_lab.close()
assert features.size % float(dimension) == 0.0, 'specified dimension not compatible with data'
frame_number = features.size / dimension
features = features[:(dimension * frame_number)]
features = features.reshape((-1, dimension))
return features, frame_number
if __name__ == "__main__":
list = read_file_list_from_path("D:/test", if_recursive=True)
print(list)
| [
"os.path.exists",
"numpy.fromfile",
"os.listdir",
"os.makedirs",
"util.log_util.get_logger",
"os.path.join",
"numpy.squeeze",
"shutil.copytree",
"numpy.array",
"os.path.isfile",
"os.path.isdir",
"os.mkdir",
"shutil.rmtree",
"numpy.mod",
"os.remove"
] | [((78, 113), 'util.log_util.get_logger', 'log_util.get_logger', (['"""file process"""'], {}), "('file process')\n", (97, 113), False, 'from util import log_util\n'), ((552, 568), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (562, 568), False, 'import os\n'), ((2300, 2338), 'shutil.copytree', 'shutil.copytree', (['src_path', 'target_path'], {}), '(src_path, target_path)\n', (2315, 2338), False, 'import shutil\n'), ((3678, 3707), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'dtype'}), '(fid, dtype=dtype)\n', (3689, 3707), True, 'import numpy as np\n'), ((3969, 3987), 'numpy.squeeze', 'np.squeeze', (['m_data'], {}), '(m_data)\n', (3979, 3987), True, 'import numpy as np\n'), ((4133, 4156), 'numpy.array', 'np.array', (['m_data', 'dtype'], {}), '(m_data, dtype)\n', (4141, 4156), True, 'import numpy as np\n'), ((4306, 4331), 'numpy.array', 'np.array', (['data', '"""float32"""'], {}), "(data, 'float32')\n", (4314, 4331), True, 'import numpy as np\n'), ((4516, 4554), 'numpy.fromfile', 'np.fromfile', (['fid_lab'], {'dtype': 'np.float32'}), '(fid_lab, dtype=np.float32)\n', (4527, 4554), True, 'import numpy as np\n'), ((589, 613), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (601, 613), False, 'import os\n'), ((2622, 2642), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2636, 2642), False, 'import os\n'), ((3020, 3041), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (3031, 3041), False, 'import os\n'), ((3731, 3755), 'numpy.mod', 'np.mod', (['v_data.size', 'dim'], {}), '(v_data.size, dim)\n', (3737, 3755), True, 'import numpy as np\n'), ((654, 678), 'os.path.isfile', 'os.path.isfile', (['tmp_file'], {}), '(tmp_file)\n', (668, 678), False, 'import os\n'), ((2659, 2678), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2672, 2678), False, 'import os\n'), ((2967, 2991), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (2981, 2991), False, 'import os\n'), ((3418, 3438), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3432, 3438), False, 'import os\n'), ((3452, 3466), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (3460, 3466), False, 'import os\n'), ((905, 928), 'os.path.isdir', 'os.path.isdir', (['tmp_file'], {}), '(tmp_file)\n', (918, 928), False, 'import os\n'), ((2696, 2735), 'shutil.rmtree', 'shutil.rmtree', (['path'], {'ignore_errors': '(True)'}), '(path, ignore_errors=True)\n', (2709, 2735), False, 'import shutil\n'), ((2753, 2773), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2767, 2773), False, 'import os\n'), ((2791, 2806), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (2800, 2806), False, 'import os\n')] |
"""
感知机模型:
1、训练数据样本;
2、选取初始值;
3、判断是否为误分类点;
4、若是误分类点,则随机梯度下降,并更新权值 w 与 偏置值 b;
5、直至无误分类点。
"""
#%%
import numpy as np
import matplotlib.pyplot as plt
#%%
class showPicture:
def __init__(self, xSample, ySample, w, b):
self.w = w
self.b = b
self.xData = np.linspace(-2, 2, 100)
self.yData = self.expression()
plt.figure(1)
plt.title("Perception classifier", size = 14)
plt.xlabel("x-axis", size = 14)
plt.ylabel("y-axis", size = 14)
plt.xlim(left = -2, right = 2)
plt.ylim(bottom = -2, top = 2)
for i, xi in enumerate(xSample):
plt.scatter(xi[0], xi[1], s = 50, color = "b" if y[i] == 1 else "r", marker = "o" if y[i] == 1 else "x")
def expression(self):
yData = (-self.b - self.w[0] * self.xData) / self.w[1]
return yData
def getCenterPoint(self):
centerX = (self.xData[0] + self.xData[-1]) / 2
centerY = (self.yData[0] + self.yData[-1]) / 2
return centerX, centerY
def drawNormalVector(self, A, B, x0, y0):
xData = np.linspace(x0, x0 + 0.25, 20)
yData = (B / A) * xData + (y0 - B / A * x0)
plt.plot(xData, yData, color = "r", label = "normal vector")
def show(self, *errorPoint):
# print(errorPoint[0])
if len(errorPoint) != 0:
plt.scatter(errorPoint[0][0], errorPoint[0][1], s = 50, color = "k")
plt.text(errorPoint[0][0] + 0.2, errorPoint[0][1] + 0.2, "Error Point(" + str(errorPoint[0][0]) + ", " + str(errorPoint[0][1]) + ")")
line = ""
for i, wi in enumerate(self.w):
line += str(wi[0]) + "* x" + str(i) + " + "
line += str(self.b) + " = 0"
plt.plot(self.xData, self.yData, color = "k", label = "y1 data")
# 获取直线中点坐标,用于添加备注与法向量
x0 = self.getCenterPoint()[0]
y0 = self.getCenterPoint()[1]
self.drawNormalVector(self.w[0][0], self.w[1][0], x0, y0)
plt.text(x0, y0, line)
plt.show()
#%%
class perceptron:
def __init__(self, x, y, a = 1):
self.x = x
self.y = y
self.w = np.zeros((x.shape[1], 1))
self.b = 0
# a 为学习率
self.a = a
def sign(self, w, b, x):
return np.dot(x, w) + b
def SGD(self, *args):
flag = True
while flag:
# count 用于判断是否全部点都已正确分类,然后跳出循环
count = 0
for i, xi in enumerate(self.x):
# 判断该点是否是误分类点
tmpY = self.sign(self.w, self.b, xi)
if self.y[i] * tmpY <= 0:
# 更新权值 w 与 偏置值 b
count += 1
# 作图
sp = showPicture(self.x, self.y, self.w, self.b)
sp.show(xi)
deltaW = self.a * self.y[i] * xi
deltaB = self.a * self.y[i]
# print("deltaB = ", deltaB)
# if abs(deltaB) < self.a:
# flag = False
self.w = self.w + deltaW.reshape(self.w.shape)
self.b = self.b + deltaB
if count == 0:
flag = False
return self.w, self.b
#%%
# x1 = np.array((-0.8, 0.9))
# x2 = np.array((-0.75, 0.6))
# x3 = np.array((-0.6, 0.7))
# x4 = np.array((-0.4, 0.75))
# x5 = np.array((-0.2, -0.2))
# x6 = np.array((0.25, 0.65))
# x7 = np.array((0.35, -0.5))
# x8 = np.array((0.4, 0.4))
# x9 = np.array((0.6, -0.9))
# x10 = np.array((0.9, 0.8))
# y1 = y2 = y3 = y4 = y5 = 1
# y6 = y7 = y8 = y9 = y10 = -1
# x = np.array((x1, x2, x3, x4, x5, x6, x7, x8, x9, x10))
# y = np.array((y1, y2, y3, y4, y5, y6, y7, y8, y9, y10))
x1 = np.array((0, 0))
x2 = np.array((1, 1))
x3 = np.array((0, 1))
x4 = np.array((1, 0))
y1 = y2 = 1
y3 = y4 = -1
x = np.array((x1, x2, x3, x4))
y = np.array((y1, y2, y3, y4))
#%%
p = perceptron(x, y)
w, b = p.SGD()
print("w = ", w)
print("b = ", b)
#%%
sp = showPicture(x, y, w, b)
sp.show()
#%%
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.dot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.p... | [((3718, 3734), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (3726, 3734), True, 'import numpy as np\n'), ((3740, 3756), 'numpy.array', 'np.array', (['(1, 1)'], {}), '((1, 1))\n', (3748, 3756), True, 'import numpy as np\n'), ((3762, 3778), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (3770, 3778), True, 'import numpy as np\n'), ((3784, 3800), 'numpy.array', 'np.array', (['(1, 0)'], {}), '((1, 0))\n', (3792, 3800), True, 'import numpy as np\n'), ((3832, 3858), 'numpy.array', 'np.array', (['(x1, x2, x3, x4)'], {}), '((x1, x2, x3, x4))\n', (3840, 3858), True, 'import numpy as np\n'), ((3863, 3889), 'numpy.array', 'np.array', (['(y1, y2, y3, y4)'], {}), '((y1, y2, y3, y4))\n', (3871, 3889), True, 'import numpy as np\n'), ((303, 326), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(100)'], {}), '(-2, 2, 100)\n', (314, 326), True, 'import numpy as np\n'), ((374, 387), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (384, 387), True, 'import matplotlib.pyplot as plt\n'), ((396, 439), 'matplotlib.pyplot.title', 'plt.title', (['"""Perception classifier"""'], {'size': '(14)'}), "('Perception classifier', size=14)\n", (405, 439), True, 'import matplotlib.pyplot as plt\n'), ((450, 479), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-axis"""'], {'size': '(14)'}), "('x-axis', size=14)\n", (460, 479), True, 'import matplotlib.pyplot as plt\n'), ((490, 519), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-axis"""'], {'size': '(14)'}), "('y-axis', size=14)\n", (500, 519), True, 'import matplotlib.pyplot as plt\n'), ((530, 556), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(-2)', 'right': '(2)'}), '(left=-2, right=2)\n', (538, 556), True, 'import matplotlib.pyplot as plt\n'), ((569, 595), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(-2)', 'top': '(2)'}), '(bottom=-2, top=2)\n', (577, 595), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1147), 'numpy.linspace', 'np.linspace', (['x0', '(x0 + 0.25)', '(20)'], {}), '(x0, x0 + 0.25, 20)\n', (1128, 1147), True, 'import numpy as np\n'), ((1208, 1264), 'matplotlib.pyplot.plot', 'plt.plot', (['xData', 'yData'], {'color': '"""r"""', 'label': '"""normal vector"""'}), "(xData, yData, color='r', label='normal vector')\n", (1216, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1754, 1814), 'matplotlib.pyplot.plot', 'plt.plot', (['self.xData', 'self.yData'], {'color': '"""k"""', 'label': '"""y1 data"""'}), "(self.xData, self.yData, color='k', label='y1 data')\n", (1762, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1999, 2021), 'matplotlib.pyplot.text', 'plt.text', (['x0', 'y0', 'line'], {}), '(x0, y0, line)\n', (2007, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2030, 2040), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2038, 2040), True, 'import matplotlib.pyplot as plt\n'), ((2157, 2182), 'numpy.zeros', 'np.zeros', (['(x.shape[1], 1)'], {}), '((x.shape[1], 1))\n', (2165, 2182), True, 'import numpy as np\n'), ((654, 756), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xi[0]', 'xi[1]'], {'s': '(50)', 'color': "('b' if y[i] == 1 else 'r')", 'marker': "('o' if y[i] == 1 else 'x')"}), "(xi[0], xi[1], s=50, color='b' if y[i] == 1 else 'r', marker='o' if\n y[i] == 1 else 'x')\n", (665, 756), True, 'import matplotlib.pyplot as plt\n'), ((1379, 1443), 'matplotlib.pyplot.scatter', 'plt.scatter', (['errorPoint[0][0]', 'errorPoint[0][1]'], {'s': '(50)', 'color': '"""k"""'}), "(errorPoint[0][0], errorPoint[0][1], s=50, color='k')\n", (1390, 1443), True, 'import matplotlib.pyplot as plt\n'), ((2284, 2296), 'numpy.dot', 'np.dot', (['x', 'w'], {}), '(x, w)\n', (2290, 2296), True, 'import numpy as np\n')] |
import os
import tempfile
import unittest
from nnimgproc.util.parameters import Parameters
class TestParametersMethods(unittest.TestCase):
def test_null(self):
params = Parameters()
with self.assertRaises(ValueError):
params.get('none')
params.set('none', None)
self.assertIsNone(params.get('none'))
def test_default_value(self):
params = Parameters()
self.assertEqual([0, 3], params.get('none', [0, 3]))
self.assertNotEqual([4, 3], params.get('none', [0, 3]))
def test_set_get(self):
params = Parameters()
params.set('none', [0, 3])
self.assertEqual([0, 3], params.get('none', [4, 3]))
import numpy as np
value = np.random.uniform(0, 1, (10, 10))
params.set('value', value)
# Set a reasonably small value as threshold.
self.assertTrue((value - params.get('value')).sum() < 1e-5)
def test_save_load(self):
params = Parameters()
params.set('none', [0, 3])
import numpy as np
value = np.random.uniform(0, 1, (10, 10))
params.set('value', value)
params.save(os.path.join(tempfile.gettempdir(),
'nnimgproc_test_parameters.pkl'))
new_params = Parameters()
new_params.load(os.path.join(tempfile.gettempdir(),
'nnimgproc_test_parameters.pkl'))
self.assertEqual([0, 3], new_params.get('none'))
self.assertLess(np.abs((value - new_params.get('value'))).sum(), 1e-5)
value[5, 5] += 3e-5
# The value is mutable
self.assertEqual(value[5, 5], params.get('value')[5, 5])
# The new value will exceed the neighbourhood of expectation
self.assertFalse((value[5, 5] - new_params.get('value')[5, 5]) < 1e-5)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"nnimgproc.util.parameters.Parameters",
"tempfile.gettempdir",
"numpy.random.uniform"
] | [((1870, 1885), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1883, 1885), False, 'import unittest\n'), ((185, 197), 'nnimgproc.util.parameters.Parameters', 'Parameters', ([], {}), '()\n', (195, 197), False, 'from nnimgproc.util.parameters import Parameters\n'), ((404, 416), 'nnimgproc.util.parameters.Parameters', 'Parameters', ([], {}), '()\n', (414, 416), False, 'from nnimgproc.util.parameters import Parameters\n'), ((588, 600), 'nnimgproc.util.parameters.Parameters', 'Parameters', ([], {}), '()\n', (598, 600), False, 'from nnimgproc.util.parameters import Parameters\n'), ((740, 773), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(10, 10)'], {}), '(0, 1, (10, 10))\n', (757, 773), True, 'import numpy as np\n'), ((978, 990), 'nnimgproc.util.parameters.Parameters', 'Parameters', ([], {}), '()\n', (988, 990), False, 'from nnimgproc.util.parameters import Parameters\n'), ((1070, 1103), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(10, 10)'], {}), '(0, 1, (10, 10))\n', (1087, 1103), True, 'import numpy as np\n'), ((1284, 1296), 'nnimgproc.util.parameters.Parameters', 'Parameters', ([], {}), '()\n', (1294, 1296), False, 'from nnimgproc.util.parameters import Parameters\n'), ((1172, 1193), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1191, 1193), False, 'import tempfile\n'), ((1334, 1355), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1353, 1355), False, 'import tempfile\n')] |
import numpy as np
class MCAgent:
def __init__(self, environment, manpower=300, episodes=1000, maxSteps=1000, epsilon=0.05, alpha=0.1, gamma=1):
self.environment = environment
self.actionSet = self.environment.actionSet
self.manpower = manpower
self.episodes = episodes
self.maxSteps = maxSteps
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
self.actionStateValues = {}
self.actionStateVisits = {}
self.actionStateRewards = {}
self.locationSnapShots = []
for x in range(self.environment.height):
for y in range(self.environment.width):
self.actionStateValues[(x,y)] = {}
self.actionStateVisits[(x,y)] = {}
self.actionStateRewards[(x,y)] = {}
for a in self.actionSet:
self.actionStateValues[(x,y)][a] = 0
self.actionStateVisits[(x,y)][a] = 0
self.actionStateRewards[(x,y)][a] = 0
# Choosing an action:
# Compare a random float between 0 and 1 to the epsilon.
# If lower than epsilon, pick a random action from the action set
# to explore possibilities. Otherwise choose the current best
# performing action from the statewise action pool.
# If current policy has multiple best choices, choose at random
# between said choices.
def decideAction(self):
if np.random.uniform(low=0,high=1) < self.epsilon:
a = np.random.choice(self.actionSet)
else:
greedyValue = max(self.actionStateValues[self.environment.agentLocation].values())
greedyActions = [k for k,v in self.actionStateValues[self.environment.agentLocation].items() if v == greedyValue]
a = np.random.choice(greedyActions)
return a
def run(self):
rewardLog = []
iteration = 1
for episode in range(self.episodes):
step = 0
terminate = False
cumulativeReward = 0
episodeSAR = []
# Episode snaps are for agent action simulating purposes only;
# allows for rendering of episodes.
episodeStepSnaps = []
episodeStepSnaps.append(self.environment.getAgentandSpecialLoc())
while not terminate:
oldState = self.environment.agentLocation # Save current state to variable
a = self.decideAction() # Decide action; explore or follow policy
reward = self.environment.step(a) # Calculate and save reward based on action
episodeSAR.append((oldState,a,reward)) # Keep building the S,A,R, ... array
cumulativeReward += reward # Add gained reward to episode total reward
step += 1
episodeStepSnaps.append(self.environment.getAgentandSpecialLoc())
if self.environment.checkState() == "terminal" or step >= self.maxSteps:
self.predictEpisode(episodeSAR)
self.environment.__init__() # Re-initialize the environment
terminate = True
rewardLog.append(cumulativeReward)
self.locationSnapShots.append(episodeStepSnaps)
print("Episode", iteration, ":", cumulativeReward)
iteration += 1
return rewardLog, self.locationSnapShots
class FirstVisitMCAgent(MCAgent):
def predictEpisode(self, episodeSAR):
uniqueStateVisits = [] # To keep book of first visits ONLY
for idx, sar in enumerate(episodeSAR):
if (sar[0],sar[1]) in uniqueStateVisits:
# If the state-action pair has already been visited,
# discard and continue.
continue
else:
g = 0
# Iterate through state->action->reward -tuples of the episode
# starting from a unique state-action -visit and
# calculate the cumulative reward.
for i in range(idx, len(episodeSAR)):
g += episodeSAR[i][2]
# Mark the state-action pair as visited
uniqueStateVisits.append((sar[0],sar[1]))
# Keep track of how many times the pair has been visited
# over all the episodes.
self.actionStateVisits[sar[0]][sar[1]] += 1
# Keep track of the over all cumulative reward of the pair
# over all the episodes.
self.actionStateRewards[sar[0]][sar[1]] += g
# Calculate value of the state-action -pair.
self.actionStateValues[sar[0]][sar[1]] = self.actionStateRewards[sar[0]][sar[1]] / self.actionStateVisits[sar[0]][sar[1]]
class EveryVisitMCAgent(MCAgent):
def predictEpisode(self, episodeSAR):
uniqueStateVisits = []
for idx, sar in enumerate(episodeSAR):
if (sar[0],sar[1]) in uniqueStateVisits:
self.actionStateVisits[sar[0]][sar[1]] += 1
continue
else:
g = 0
for i in range(idx, len(episodeSAR)):
g += episodeSAR[i][2]
uniqueStateVisits.append((sar[0],sar[1]))
self.actionStateVisits[sar[0]][sar[1]] += 1
self.actionStateRewards[sar[0]][sar[1]] += g
self.actionStateValues[sar[0]][sar[1]] = self.actionStateRewards[sar[0]][sar[1]] / self.actionStateVisits[sar[0]][sar[1]] | [
"numpy.random.choice",
"numpy.random.uniform"
] | [((1456, 1488), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)'}), '(low=0, high=1)\n', (1473, 1488), True, 'import numpy as np\n'), ((1520, 1552), 'numpy.random.choice', 'np.random.choice', (['self.actionSet'], {}), '(self.actionSet)\n', (1536, 1552), True, 'import numpy as np\n'), ((1804, 1835), 'numpy.random.choice', 'np.random.choice', (['greedyActions'], {}), '(greedyActions)\n', (1820, 1835), True, 'import numpy as np\n')] |
"""High-level API for saving/loading responses from FE simulation."""
from __future__ import annotations
# Print debug information for this file.
import os
from collections import deque
from copy import deepcopy
from typing import Callable, List, Optional
import numpy as np
from bridge_sim.model import (
Config,
ResponseType,
Point,
PointLoad,
Vehicle,
Config as LibConfig,
PierSettlement,
)
from bridge_sim.scenarios import (
PierSettlementScenario,
Scenario,
HealthyScenario,
transverse_crack,
healthy_damage_w_crack_nodes,
)
from bridge_sim.sim.model import SimParams, ManyResponses, Responses
from bridge_sim.sim.run import FEMRunner, load_expt_responses, load_fem_responses
from bridge_sim.sim.run.opensees import OSRunner
from bridge_sim.util import (
print_i,
print_w,
flatten,
print_d,
round_m,
shorten_path,
log,
)
from pathos import multiprocessing as multiprocessing
D: str = "fem.fem"
# D: bool = False
def responses_to_traffic_array(
c: Config,
traffic_array: "TrafficArray",
response_type: ResponseType,
damage_scenario: "Scenario",
points: List[Point],
sim_runner: Callable[[Config], FEMRunner] = OSRunner,
):
"""The magic function.
Args:
c: Config, global configuration object.
traffic_array: TrafficArray, ....
damage_scenario: DamageScenario, the scenarios scenario of the bridge.
response_type: ResponseType, the type of sensor response to calculate.
points: List[Point], points on the bridge to calculate fem at.
sim_runner: Callable[[Config], FEMRunner], the FEM program to run
simulations with.
"""
use_c = damage_scenario.use(c)[0]
unit_load_matrix = ULResponses.load_ulm(
c=use_c,
response_type=response_type,
points=points,
sim_runner=sim_runner(use_c),
)
print(traffic_array.shape)
print(unit_load_matrix.shape)
responses = np.matmul(traffic_array, unit_load_matrix)
# Calculate the response at each point due to pier settlement.
pd_responses = np.zeros(responses.shape).T
assert len(pd_responses) == len(points)
if isinstance(damage_scenario, PierSettlementScenario):
pd_expt = list(
PSResponses.load(c=c, response_type=response_type, fem_runner=sim_runner(c))
)
for point_i, point in enumerate(points):
for pier_displacement in damage_scenario.pier_disps:
pd_sim_responses = pd_expt[pier_displacement.pier]
pd_responses[point_i] += pd_sim_responses.at_deck(
point, interp=False
) * (pier_displacement.displacement / c.pd_unit_disp)
return responses + pd_responses.T
def responses_to_loads_d(
c: Config,
response_type: ResponseType,
points: List[Point],
loads: List[List[PointLoad]],
damage_scenario: Scenario = HealthyScenario(),
):
"""Responses to point-loads via direct simulation (not using superposition).
"""
if not isinstance(damage_scenario, HealthyScenario):
raise ValueError("Only HealthyDamage supported in direct simulation")
expt_responses = load_expt_responses(
c=c,
expt_params=[SimParams(ploads=loads_) for loads_ in loads],
response_type=response_type,
)
result = []
for sim_responses in expt_responses:
result.append([sim_responses.at_deck(point, interp=True) for point in points])
print_i("Interpolating fem in responses_from_load_d")
return np.array(result)
def responses_to_vehicles_d(
c: Config,
response_type: ResponseType,
points: List[Point],
mv_vehicles: List[Vehicle],
times: List[float],
binned: bool = True,
damage_scenario: Scenario = HealthyScenario(),
):
"""Response vehicles via direct simulation (not using superposition).
"""
if not isinstance(damage_scenario, HealthyScenario):
raise ValueError("Only HealthyDamage supported in direct simulation")
if binned:
loads = [
[v.to_wheel_track_loads(c=c, time=time) for v in mv_vehicles]
for time in times
]
else:
print_w(f"Not using fractions of wheel track bins in simulation")
loads = [
[v.to_point_load_pw(time=time, bridge=c.bridge) for v in mv_vehicles]
for time in times
]
loads = [flatten(vehicle_loads, PointLoad) for vehicle_loads in loads]
print([len(load_) for load_ in loads])
print(loads[0])
print(loads[-1])
assert isinstance(loads, list)
assert isinstance(loads[0], list)
assert isinstance(loads[0][0], PointLoad)
return responses_to_loads_d(
c=c,
response_type=response_type,
points=points,
loads=loads,
damage_scenario=damage_scenario,
)
def load(
config: LibConfig,
response_type: ResponseType,
point_loads: List[PointLoad] = [],
pier_settlement: List[PierSettlement] = [],
):
"""Responses from a single linear simulation.
The simulation is only run if results are not found on disk.
Args:
config: simulation configuration object.
response_type: sensor response type to return.
point_loads: a list of point-loads to apply.
pier_settlement: a pier settlement to apply.
"""
return load_fem_responses(
c=config,
sim_params=SimParams(ploads=point_loads, pier_settlement=pier_settlement),
response_type=response_type,
)
class PSResponses(ManyResponses):
"""Responses of one sensor type for pier settlement simulations."""
@staticmethod
def load(
c: Config,
response_type: ResponseType,
fem_runner: FEMRunner,
save_all: bool = True,
):
"""Load a DCExpt from disk, running simulations first if necessary.
Args:
c: Config, global configuration object.
response_type: ResponseType, the type of sensor response to load.
fem_runner: FEMRunner, the FE program to run simulations with.
save_all: bool, save all response types when running a simulation.
"""
# id_str = f"dc-{response_type.name()}-{fem_runner.name}"
# Determine experiment simulation parameters.
expt_params = [
SimParams(displacement_ctrl=PierSettlement(c.pd_unit_disp, i))
for i in range(len(c.bridge.supports))
]
return load_expt_responses(
c=c, expt_params=expt_params, response_type=response_type,
)
class ULResponses(ManyResponses):
"""Responses of one sensor type for influence line calculations.
Each simulation is for a different loading position in the longitudinal
direction of the bridge. The z position is fixed for one ILExpt, thus a
different ILExpt should be used for each tire wheel on a bridge.
"""
def response_to(
self,
x_frac: float,
z_frac: float,
load_x_frac: float,
load: float,
y_frac: float = 1,
time_index: int = 0,
):
"""The response value in kN at a position to a load at a position.
NOTE: only the loading position in longitudinal direction can be chosen,
with 'load_x_frac', the position in transverse direction is fixed for a
single ILExpt.
Args:
x_frac: float, response position on x-axis in [0 1].
y_frac: float, response position on y-axis in [0 1].
z_frac: float, response position on x-axis in [0 1].
load_x_frac: float, load position on x-axis in [0 1].
load: float, value of the load in kN.
time_index: int, time index of the simulation.
"""
assert 0 <= x_frac <= 1
assert 0 <= load_x_frac <= 1
print_d(D, f"x_frac = {x_frac} = load_x_frac = {load_x_frac}")
response = super().sim_response(
expt_frac=load_x_frac,
x_frac=x_frac,
y_frac=y_frac,
z_frac=z_frac,
time_index=time_index,
)
return response * (load / self.c.il_unit_load_kn)
def id_str(
c: Config,
response_type: ResponseType,
sim_runner: FEMRunner,
wheel_zs: List[float],
):
wheel_zs_str = [round_m(wheel_z) for wheel_z in wheel_zs]
return (
f"il-{response_type.name()}-{sim_runner.name}-{c.il_unit_load_kn}"
+ f"-{c.il_num_loads}-z={wheel_zs_str}"
)
@staticmethod
def load_ulm(
c: Config,
response_type: ResponseType,
points: List[Point],
sim_runner: FEMRunner,
):
wheel_zs = c.bridge.wheel_track_zs(c)
filepath = c.get_data_path(
"ulms",
(
ULResponses.id_str(
c=c,
response_type=response_type,
sim_runner=sim_runner,
wheel_zs=wheel_zs,
)
+ str([str(point) for point in points])
)
+ ".ulm",
)
filepath = shorten_path(c=c, bypass_config=True, filepath=filepath)
if os.path.exists(filepath):
with open(filepath, "rb") as f:
return np.load(f)
def ulm_partial(wheel_z):
"""Slice of unit load matrix for one wheel track."""
wheel_track = ULResponses.load_wheel_track(
c=c,
response_type=response_type,
fem_runner=sim_runner,
load_z_frac=c.bridge.z_frac(wheel_z),
run_only=False,
)
partial = np.empty((c.il_num_loads, len(points)))
i = 0
for sim_responses in wheel_track:
for j, point in enumerate(points):
partial[i][j] = sim_responses.at_deck(point, interp=False)
if wheel_z < 0 and i == 302:
log(
c,
f"z = {wheel_z}, i = 302, partial[i][j] = {partial[i][j]}",
)
i += 1
assert i == c.il_num_loads
print_i(f"Calculated unit load matrix for wheel track {wheel_z}")
return partial
# Calculate results in parallel.
print_i(f"Calculating unit load matrix...")
with multiprocessing.Pool(processes=len(wheel_zs)) as pool:
partial_results = pool.map(ulm_partial, wheel_zs)
# And insert into the unit load matrix.
unit_load_matrix = np.empty((len(wheel_zs) * c.il_num_loads, len(points)))
for w_ind in range(len(wheel_zs)):
row_ind = w_ind * c.il_num_loads
unit_load_matrix[row_ind : row_ind + c.il_num_loads] = partial_results[
w_ind
]
# Divide by unit load, so the value at a cell is the response to 1 kN.
unit_load_matrix /= c.il_unit_load_kn
with open(filepath, "wb") as f:
np.save(f, unit_load_matrix)
return unit_load_matrix
@staticmethod
def load_wheel_tracks(
c: Config,
response_type: ResponseType,
sim_runner: FEMRunner,
wheel_zs: List[float],
run_only: bool = False,
):
"""Return a dictionary of wheel tracks indexed by z position.
Each wheel track will be calculated in parallel if the
'Config.parallel_ulm' is set. If the 'run_only' option is given, then
the simulations will run but the results will not be loaded into memory.
"""
def create_or_load_wheel_track(
wheel_z,
_run_only: bool = True,
left_only: bool = False,
right_only: bool = False,
):
results = ULResponses.load_wheel_track(
c=deepcopy(c),
response_type=response_type,
fem_runner=deepcopy(sim_runner),
load_z_frac=c.bridge.z_frac(wheel_z),
run_only=_run_only,
left_only=left_only,
right_only=right_only,
)
# If results are only being generated, then evaluate the generator,
# such that the results are generated. Otherwise leave the generator
# to be used by the caller.
if _run_only:
# This forces the generator to be consumed without keeping the
# contents in memory. https://stackoverflow.com/a/47456679
deque(results, maxlen=0)
return
# Otherwise return the generator, to be evaluated.
else:
return results
# For each wheel track, generate it if doesn't exists.
# Use upto (2 x wheel_track) parallelism.
if c.parallel_ulm:
processes = min(multiprocessing.cpu_count(), len(wheel_zs * 2))
print_i(f"Running with {processes} processes")
with multiprocessing.Pool(processes=processes) as pool:
def _run(params):
"""Tuple of wheel z & left/right and runs wheel track."""
z, l, r = params
create_or_load_wheel_track(wheel_z=z, left_only=l, right_only=r)
# Construct two variants of parameters for each wheel track.
all_params = []
for wheel_z in wheel_zs:
all_params.append((wheel_z, True, False))
all_params.append((wheel_z, False, True))
assert len(all_params) == 2 * len(wheel_zs)
pool.map(_run, all_params)
else:
list(map(create_or_load_wheel_track, wheel_zs))
if run_only:
return
# Load all wheel tracks from disk into the resulting dictionary.
result = dict()
for wheel_z in wheel_zs:
result[wheel_z] = create_or_load_wheel_track(
wheel_z=wheel_z, _run_only=False
)
return result
@staticmethod
def load_wheel_track(
c: Config,
response_type: ResponseType,
fem_runner: FEMRunner,
load_z_frac: float,
run_only: bool,
indices: Optional[List[int]] = None,
left_only: bool = False,
right_only: bool = False,
) -> List[Responses]:
"""Load a wheel track from disk, running simulations if necessary.
NOTE: The result is a generator, not a list.
Args:
c: Config, global configuration object.
response_type: ResponseType, type of sensor response to return.
fem_runner: FEMRunner, program to run finite element simulations.
load_z_frac: float, load position as a fraction of the transverse
direction in [0 1].
run_only: bool, only run the simulation, do not load results.
left_only: bool, if true only run the left-hand-side of the wheel
track. If true, right_only must be false and indices None.
right_only: bool, if True only run the right-hand-side of the wheel
track. If true, left_only must be false and indices None.
"""
wheel_xs = c.bridge.wheel_track_xs(c)
first_right_index = len(wheel_xs) // 2
print(f"First right index = {first_right_index}")
if left_only:
assert not right_only
assert indices is None
wheel_xs = wheel_xs[:first_right_index]
if right_only:
assert not left_only
assert indices is None
wheel_xs = wheel_xs[first_right_index:]
assert 0 <= load_z_frac <= 1
# Determine experiment simulation parameters.
expt_params = [
SimParams(
ploads=[
PointLoad(
x_frac=c.bridge.x_frac(x),
z_frac=load_z_frac,
kn=c.il_unit_load_kn,
)
],
clean_build=True,
)
for x in wheel_xs
]
# Filter simulations, only running those in 'indices'.
if indices is not None:
expt_params.sim_params = [
sp for i, sp in enumerate(expt_params.sim_params) if i in indices
]
return load_expt_responses(
c=c,
expt_params=expt_params,
response_type=response_type,
sim_runner=fem_runner,
run_only=run_only,
)
def run_uls(
c: Config,
piers: bool,
healthy: bool,
cracked: bool,
crack_x: Optional[int] = None,
crack_length: Optional[int] = None,
):
"""Run all unit load simulations."""
def crack_f():
return transverse_crack(at_x=crack_x, length=crack_length)
print_i(
f"Running simulations with crack zone at x = {crack_x}, length = {crack_length}"
)
response_type = ResponseType.YTranslation
if piers:
# Pier settlement.
list(PSResponses.load(c=c, response_type=response_type, fem_runner=OSRunner(c)))
if healthy:
c = healthy_damage_w_crack_nodes(crack_f=crack_f).use(c)[0]
# Unit load simulations (healthy bridge).
ULResponses.load_wheel_tracks(
c=c,
response_type=response_type,
sim_runner=OSRunner(c),
wheel_zs=c.bridge.wheel_track_zs(c),
run_only=True,
)
elif cracked:
# Unit load simulations (cracked bridge).
c = crack_f().use(c)[0]
ULResponses.load_wheel_tracks(
c=c,
response_type=response_type,
sim_runner=OSRunner(c),
wheel_zs=c.bridge.wheel_track_zs(c),
run_only=True,
)
def run_ulm(c: Config, healthy: bool, cracked: bool, x_i: float, z_i: float):
"""Run all unit load simulations."""
response_type = ResponseType.YTranslation
wheel_xs = c.bridge.wheel_track_xs(c)
wheel_x = wheel_xs[x_i]
wheel_zs = c.bridge.wheel_track_zs(c)
wheel_z = wheel_zs[z_i]
print_i(f"Wheel (x, z) = ({wheel_x}, {wheel_z})")
point = Point(x=wheel_x, y=0, z=wheel_z)
if healthy:
ULResponses.load_ulm(
c=c, response_type=response_type, points=[point], sim_runner=OSRunner(c),
)
if cracked:
c = transverse_crack().use(c)[0]
ULResponses.load_ulm(
c=c, response_type=response_type, points=[point], sim_runner=OSRunner(c),
)
| [
"numpy.array",
"bridge_sim.scenarios.HealthyScenario",
"bridge_sim.util.round_m",
"copy.deepcopy",
"numpy.save",
"bridge_sim.util.print_d",
"os.path.exists",
"collections.deque",
"bridge_sim.util.flatten",
"numpy.matmul",
"bridge_sim.sim.model.SimParams",
"pathos.multiprocessing.Pool",
"brid... | [((1988, 2030), 'numpy.matmul', 'np.matmul', (['traffic_array', 'unit_load_matrix'], {}), '(traffic_array, unit_load_matrix)\n', (1997, 2030), True, 'import numpy as np\n'), ((2937, 2954), 'bridge_sim.scenarios.HealthyScenario', 'HealthyScenario', ([], {}), '()\n', (2952, 2954), False, 'from bridge_sim.scenarios import PierSettlementScenario, Scenario, HealthyScenario, transverse_crack, healthy_damage_w_crack_nodes\n'), ((3566, 3582), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (3574, 3582), True, 'import numpy as np\n'), ((3800, 3817), 'bridge_sim.scenarios.HealthyScenario', 'HealthyScenario', ([], {}), '()\n', (3815, 3817), False, 'from bridge_sim.scenarios import PierSettlementScenario, Scenario, HealthyScenario, transverse_crack, healthy_damage_w_crack_nodes\n'), ((16903, 17002), 'bridge_sim.util.print_i', 'print_i', (['f"""Running simulations with crack zone at x = {crack_x}, length = {crack_length}"""'], {}), "(\n f'Running simulations with crack zone at x = {crack_x}, length = {crack_length}'\n )\n", (16910, 17002), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((18167, 18216), 'bridge_sim.util.print_i', 'print_i', (['f"""Wheel (x, z) = ({wheel_x}, {wheel_z})"""'], {}), "(f'Wheel (x, z) = ({wheel_x}, {wheel_z})')\n", (18174, 18216), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((18229, 18261), 'bridge_sim.model.Point', 'Point', ([], {'x': 'wheel_x', 'y': '(0)', 'z': 'wheel_z'}), '(x=wheel_x, y=0, z=wheel_z)\n', (18234, 18261), False, 'from bridge_sim.model import Config, ResponseType, Point, PointLoad, Vehicle, Config as LibConfig, PierSettlement\n'), ((2118, 2143), 'numpy.zeros', 'np.zeros', (['responses.shape'], {}), '(responses.shape)\n', (2126, 2143), True, 'import numpy as np\n'), ((3501, 3554), 'bridge_sim.util.print_i', 'print_i', (['"""Interpolating fem in responses_from_load_d"""'], {}), "('Interpolating fem in responses_from_load_d')\n", (3508, 3554), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((4205, 4270), 'bridge_sim.util.print_w', 'print_w', (['f"""Not using fractions of wheel track bins in simulation"""'], {}), "(f'Not using fractions of wheel track bins in simulation')\n", (4212, 4270), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((4424, 4457), 'bridge_sim.util.flatten', 'flatten', (['vehicle_loads', 'PointLoad'], {}), '(vehicle_loads, PointLoad)\n', (4431, 4457), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((6492, 6570), 'bridge_sim.sim.run.load_expt_responses', 'load_expt_responses', ([], {'c': 'c', 'expt_params': 'expt_params', 'response_type': 'response_type'}), '(c=c, expt_params=expt_params, response_type=response_type)\n', (6511, 6570), False, 'from bridge_sim.sim.run import FEMRunner, load_expt_responses, load_fem_responses\n'), ((7859, 7921), 'bridge_sim.util.print_d', 'print_d', (['D', 'f"""x_frac = {x_frac} = load_x_frac = {load_x_frac}"""'], {}), "(D, f'x_frac = {x_frac} = load_x_frac = {load_x_frac}')\n", (7866, 7921), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((9155, 9211), 'bridge_sim.util.shorten_path', 'shorten_path', ([], {'c': 'c', 'bypass_config': '(True)', 'filepath': 'filepath'}), '(c=c, bypass_config=True, filepath=filepath)\n', (9167, 9211), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((9224, 9248), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (9238, 9248), False, 'import os\n'), ((10385, 10428), 'bridge_sim.util.print_i', 'print_i', (['f"""Calculating unit load matrix..."""'], {}), "(f'Calculating unit load matrix...')\n", (10392, 10428), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((16415, 16540), 'bridge_sim.sim.run.load_expt_responses', 'load_expt_responses', ([], {'c': 'c', 'expt_params': 'expt_params', 'response_type': 'response_type', 'sim_runner': 'fem_runner', 'run_only': 'run_only'}), '(c=c, expt_params=expt_params, response_type=\n response_type, sim_runner=fem_runner, run_only=run_only)\n', (16434, 16540), False, 'from bridge_sim.sim.run import FEMRunner, load_expt_responses, load_fem_responses\n'), ((16846, 16897), 'bridge_sim.scenarios.transverse_crack', 'transverse_crack', ([], {'at_x': 'crack_x', 'length': 'crack_length'}), '(at_x=crack_x, length=crack_length)\n', (16862, 16897), False, 'from bridge_sim.scenarios import PierSettlementScenario, Scenario, HealthyScenario, transverse_crack, healthy_damage_w_crack_nodes\n'), ((5434, 5496), 'bridge_sim.sim.model.SimParams', 'SimParams', ([], {'ploads': 'point_loads', 'pier_settlement': 'pier_settlement'}), '(ploads=point_loads, pier_settlement=pier_settlement)\n', (5443, 5496), False, 'from bridge_sim.sim.model import SimParams, ManyResponses, Responses\n'), ((8348, 8364), 'bridge_sim.util.round_m', 'round_m', (['wheel_z'], {}), '(wheel_z)\n', (8355, 8364), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((10242, 10307), 'bridge_sim.util.print_i', 'print_i', (['f"""Calculated unit load matrix for wheel track {wheel_z}"""'], {}), "(f'Calculated unit load matrix for wheel track {wheel_z}')\n", (10249, 10307), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((11075, 11103), 'numpy.save', 'np.save', (['f', 'unit_load_matrix'], {}), '(f, unit_load_matrix)\n', (11082, 11103), True, 'import numpy as np\n'), ((12972, 13018), 'bridge_sim.util.print_i', 'print_i', (['f"""Running with {processes} processes"""'], {}), "(f'Running with {processes} processes')\n", (12979, 13018), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n'), ((3259, 3283), 'bridge_sim.sim.model.SimParams', 'SimParams', ([], {'ploads': 'loads_'}), '(ploads=loads_)\n', (3268, 3283), False, 'from bridge_sim.sim.model import SimParams, ManyResponses, Responses\n'), ((9317, 9327), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (9324, 9327), True, 'import numpy as np\n'), ((12583, 12607), 'collections.deque', 'deque', (['results'], {'maxlen': '(0)'}), '(results, maxlen=0)\n', (12588, 12607), False, 'from collections import deque\n'), ((12912, 12939), 'pathos.multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (12937, 12939), True, 'from pathos import multiprocessing as multiprocessing\n'), ((13036, 13077), 'pathos.multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'processes'}), '(processes=processes)\n', (13056, 13077), True, 'from pathos import multiprocessing as multiprocessing\n'), ((17438, 17449), 'bridge_sim.sim.run.opensees.OSRunner', 'OSRunner', (['c'], {}), '(c)\n', (17446, 17449), False, 'from bridge_sim.sim.run.opensees import OSRunner\n'), ((18381, 18392), 'bridge_sim.sim.run.opensees.OSRunner', 'OSRunner', (['c'], {}), '(c)\n', (18389, 18392), False, 'from bridge_sim.sim.run.opensees import OSRunner\n'), ((18564, 18575), 'bridge_sim.sim.run.opensees.OSRunner', 'OSRunner', (['c'], {}), '(c)\n', (18572, 18575), False, 'from bridge_sim.sim.run.opensees import OSRunner\n'), ((6380, 6413), 'bridge_sim.model.PierSettlement', 'PierSettlement', (['c.pd_unit_disp', 'i'], {}), '(c.pd_unit_disp, i)\n', (6394, 6413), False, 'from bridge_sim.model import Config, ResponseType, Point, PointLoad, Vehicle, Config as LibConfig, PierSettlement\n'), ((11899, 11910), 'copy.deepcopy', 'deepcopy', (['c'], {}), '(c)\n', (11907, 11910), False, 'from copy import deepcopy\n'), ((11984, 12004), 'copy.deepcopy', 'deepcopy', (['sim_runner'], {}), '(sim_runner)\n', (11992, 12004), False, 'from copy import deepcopy\n'), ((17170, 17181), 'bridge_sim.sim.run.opensees.OSRunner', 'OSRunner', (['c'], {}), '(c)\n', (17178, 17181), False, 'from bridge_sim.sim.run.opensees import OSRunner\n'), ((17212, 17257), 'bridge_sim.scenarios.healthy_damage_w_crack_nodes', 'healthy_damage_w_crack_nodes', ([], {'crack_f': 'crack_f'}), '(crack_f=crack_f)\n', (17240, 17257), False, 'from bridge_sim.scenarios import PierSettlementScenario, Scenario, HealthyScenario, transverse_crack, healthy_damage_w_crack_nodes\n'), ((17757, 17768), 'bridge_sim.sim.run.opensees.OSRunner', 'OSRunner', (['c'], {}), '(c)\n', (17765, 17768), False, 'from bridge_sim.sim.run.opensees import OSRunner\n'), ((18432, 18450), 'bridge_sim.scenarios.transverse_crack', 'transverse_crack', ([], {}), '()\n', (18448, 18450), False, 'from bridge_sim.scenarios import PierSettlementScenario, Scenario, HealthyScenario, transverse_crack, healthy_damage_w_crack_nodes\n'), ((10018, 10084), 'bridge_sim.util.log', 'log', (['c', 'f"""z = {wheel_z}, i = 302, partial[i][j] = {partial[i][j]}"""'], {}), "(c, f'z = {wheel_z}, i = 302, partial[i][j] = {partial[i][j]}')\n", (10021, 10084), False, 'from bridge_sim.util import print_i, print_w, flatten, print_d, round_m, shorten_path, log\n')] |
# -*- coding: UTF-8 -*-
"""
Fazova trajektorie spinani ruznych typu zateze tranzistorem - myslim ze to byl KD503
"""
#pylint: disable=invalid-name
from matplotlib import pyplot as plt
import numpy as np
POINTS = 500
def graf_time(t, u, i, dt, dv, di, title):
t_ax = [time * dt for time in t]
i_ax = [curr * di for curr in i]
v_ax = [volt * dv for volt in u]
fig = plt.figure(dpi=100)
ax = fig.add_subplot()
ax.set_xlabel("t[S]")
ax.set_ylabel("")
ax.plot(t_ax, i_ax, label='Ic[A]')
ax.plot(t_ax, v_ax, label='Uce[V]')
ax.grid()
ax.legend(loc='upper left')
plt.title(title)
plt.show()
def graf_xy_soar(u, i, dv, di, ic, uce, pmax, p_u, u_p, title):
i_ax = [curr * di for curr in i]
v_ax = [volt * dv for volt in u]
# soar
i_max_ax = np.full((POINTS, 1), ic)
u_ax = np.linspace(0, uce, POINTS)
soar = np.zeros([POINTS])
soar_reduced = np.zeros([POINTS])
i_pm = pmax / u_ax
i_pm_reduced = p_u / u_ax
for idx in range(0, POINTS):
if i_pm[idx] > i_max_ax[idx]:
soar[idx] = i_max_ax[idx]
soar_reduced[idx] = i_max_ax[idx]
else:
soar[idx] = i_pm[idx]
if idx * uce < POINTS * u_p:
soar_reduced[idx] = i_pm[idx]
else:
soar_reduced[idx] = i_pm_reduced[idx]
fig = plt.figure(dpi=100)
ax = fig.add_subplot()
ax.set_xlabel("Uce[V]")
ax.set_ylabel("Ic[A]")
ax.plot(v_ax, i_ax, label=title)
ax.plot(u_ax, soar, label='SOAR')
ax.plot(u_ax, soar_reduced, label='SOAR@T=100°C')
ax.legend()
ax.grid()
plt.show()
# transistor SOAR
ic_max = 20 # A
uce_max = 80 # V
p_max = 150 # W
p_u = 65 # W
u_p = 30 # V
# resistive load
title = 'L zatez s diodovou kompenzaci'
r_t_div = 200E-6
r_v_div = 50 # V/div
r_i_div = 1 # 1A/1V
r_t = [0.1, 0.2, 0.2, 1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.4, 7.4, 7.5]
r_u = [0.3, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.3, 0.3]
r_i = [0.1, 0.1, 0.1, 0.4, 1.3, 1.75, 2.0, 2.33, 2.66, 2.7, 0.1, 0.1]
graf_time(r_t, r_u, r_i, r_t_div, r_v_div, r_i_div, title)
graf_xy_soar(
r_u, r_i, r_v_div, r_i_div,
ic_max, uce_max, p_max, p_u, u_p,
title)
# RL load
title = 'L bez kompenzace'
rl_t_div = 200E-6
rl_v_div = 50 # V/div
rl_i_div = 1 # 1A/1V
rl_t = [0.1, 0.2, 0.2, 1.0, 3.0, 5.0, 7.0, 7.4, 7.5, 7.6, 7.7, 7.8]
rl_u = [0.3, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 2.4, 2.2, 0.5, 0.3]
rl_i = [0.1, 0.1, 0.1, 0.33, 1.33, 1.75, 2.8, 3, 1.5, 0.1, 0.1, 0.1]
graf_time(rl_t, rl_u, rl_i, rl_t_div, rl_v_div, rl_i_div, title)
graf_xy_soar(
rl_u, rl_i, rl_v_div, rl_i_div,
ic_max, uce_max, p_max, p_u, u_p,
title)
# RL load with diode
title = 'L s kompenzaci R'
rld_t_div = 200E-6
rld_v_div = 50 # V/div
rld_i_div = 1 # 1A/1V
rld_t = [0.1, 0.2, 0.2, 1.0, 3.0, 5.0, 7.0, 7.4, 7.5, 7.6, 7.7, 7.8, 7.9, 8.0, 8.2]
rld_u = [0.3, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 1, 0.8, 0.66, 0.5, 0.45, 0.4, 0.3]
rld_i = [0.1, 0.1, 0.1, 0.33, 1.33, 1.75, 2.8, 3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
graf_time(rld_t, rld_u, rld_i, rld_t_div, rld_v_div, rld_i_div, title)
graf_xy_soar(
rld_u, rld_i, rld_v_div, rld_i_div,
ic_max, uce_max, p_max, p_u, u_p,
title)
| [
"numpy.full",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((383, 402), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(100)'}), '(dpi=100)\n', (393, 402), True, 'from matplotlib import pyplot as plt\n'), ((607, 623), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (616, 623), True, 'from matplotlib import pyplot as plt\n'), ((628, 638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (636, 638), True, 'from matplotlib import pyplot as plt\n'), ((805, 829), 'numpy.full', 'np.full', (['(POINTS, 1)', 'ic'], {}), '((POINTS, 1), ic)\n', (812, 829), True, 'import numpy as np\n'), ((841, 868), 'numpy.linspace', 'np.linspace', (['(0)', 'uce', 'POINTS'], {}), '(0, uce, POINTS)\n', (852, 868), True, 'import numpy as np\n'), ((880, 898), 'numpy.zeros', 'np.zeros', (['[POINTS]'], {}), '([POINTS])\n', (888, 898), True, 'import numpy as np\n'), ((918, 936), 'numpy.zeros', 'np.zeros', (['[POINTS]'], {}), '([POINTS])\n', (926, 936), True, 'import numpy as np\n'), ((1362, 1381), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(100)'}), '(dpi=100)\n', (1372, 1381), True, 'from matplotlib import pyplot as plt\n'), ((1627, 1637), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1635, 1637), True, 'from matplotlib import pyplot as plt\n')] |
import cv2
import numpy as np
from random import randint
animals_net = cv2.ml.ANN_MLP_create()
# 设定train函数为弹性(resilient)反向传播
animals_net.setTrainMethod(cv2.ml.ANN_MLP_RPROP | cv2.ml.ANN_MLP_UPDATE_WEIGHTS)
# 设置sigmoid作为激活函数
animals_net.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM)
# 按照书里的把隐藏层节点数设为8,效果非常差,所以改成3
animals_net.setLayerSizes(np.array([3, 3, 4]))
# 设置终止条件
animals_net.setTermCriteria((cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1))
"""Input arrays
weight, length, teeth
"""
"""Output arrays
dog, eagle, dolphin and dragon
"""
def dog_sample():
return [randint(10, 20), 1, randint(38, 42)]
def dog_class():
return [1, 0, 0, 0]
def condor_sample():
return [randint(3, 10), randint(3, 5), 0]
def condor_class():
return [0, 1, 0, 0]
def dolphin_sample():
return [randint(30, 190), randint(5, 15), randint(80, 100)]
def dolphin_class():
return [0, 0, 1, 0]
def dragon_sample():
return [randint(1200, 1800), randint(30, 40), randint(160, 180)]
def dragon_class():
return [0, 0, 0, 1]
SAMPLES = 5000
# 每种动物添加5000个样本
for x in range(1, SAMPLES + 1):
if x % 100 == 0:
print("Samples %d/%d" % (x, SAMPLES))
animals_net.train(np.array([dog_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE,
np.array([dog_class()], dtype=np.float32))
animals_net.train(np.array([condor_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE,
np.array([condor_class()], dtype=np.float32))
animals_net.train(np.array([dolphin_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE,
np.array([dolphin_class()], dtype=np.float32))
animals_net.train(np.array([dragon_sample()], dtype=np.float32), cv2.ml.ROW_SAMPLE,
np.array([dragon_class()], dtype=np.float32))
print(animals_net.predict(np.array([dog_sample()], dtype=np.float32)))
print(animals_net.predict(np.array([condor_sample()], dtype=np.float32)))
print(animals_net.predict(np.array([dolphin_sample()], dtype=np.float32)))
print(animals_net.predict(np.array([dragon_sample()], dtype=np.float32)))
| [
"numpy.array",
"cv2.ml.ANN_MLP_create",
"random.randint"
] | [((72, 95), 'cv2.ml.ANN_MLP_create', 'cv2.ml.ANN_MLP_create', ([], {}), '()\n', (93, 95), False, 'import cv2\n'), ((343, 362), 'numpy.array', 'np.array', (['[3, 3, 4]'], {}), '([3, 3, 4])\n', (351, 362), True, 'import numpy as np\n'), ((587, 602), 'random.randint', 'randint', (['(10)', '(20)'], {}), '(10, 20)\n', (594, 602), False, 'from random import randint\n'), ((607, 622), 'random.randint', 'randint', (['(38)', '(42)'], {}), '(38, 42)\n', (614, 622), False, 'from random import randint\n'), ((702, 716), 'random.randint', 'randint', (['(3)', '(10)'], {}), '(3, 10)\n', (709, 716), False, 'from random import randint\n'), ((718, 731), 'random.randint', 'randint', (['(3)', '(5)'], {}), '(3, 5)\n', (725, 731), False, 'from random import randint\n'), ((818, 834), 'random.randint', 'randint', (['(30)', '(190)'], {}), '(30, 190)\n', (825, 834), False, 'from random import randint\n'), ((836, 850), 'random.randint', 'randint', (['(5)', '(15)'], {}), '(5, 15)\n', (843, 850), False, 'from random import randint\n'), ((852, 868), 'random.randint', 'randint', (['(80)', '(100)'], {}), '(80, 100)\n', (859, 868), False, 'from random import randint\n'), ((952, 971), 'random.randint', 'randint', (['(1200)', '(1800)'], {}), '(1200, 1800)\n', (959, 971), False, 'from random import randint\n'), ((973, 988), 'random.randint', 'randint', (['(30)', '(40)'], {}), '(30, 40)\n', (980, 988), False, 'from random import randint\n'), ((990, 1007), 'random.randint', 'randint', (['(160)', '(180)'], {}), '(160, 180)\n', (997, 1007), False, 'from random import randint\n')] |
import pickle
import tensorflow as tf
import numpy as np
NUM_HEADING_BIN = 12
NUM_OBJECT_POINT = 512
NUM_SIZE_CLUSTER = 8
g_type2class = {'car': 0, 'Van': 1, 'Truck': 2, 'pedestrian': 3,
'Person_sitting': 4, 'bicycle': 5, 'Tram': 6, 'Misc': 7}
g_class2type = {g_type2class[t]: t for t in g_type2class}
g_type2onehotclass = {'car': 0, 'pedestrian': 1, 'bicycle': 2}
g_type_mean_size = {'car': np.array([4.76, 1.93, 1.72]),
'Van': np.array([5.06763659, 1.9007158, 2.20532825]),
'Truck': np.array([10.13586957, 2.58549199, 3.2520595]),
'pedestrian': np.array([0.81, 0.77, 1.78]),
'Person_sitting': np.array([0.80057803, 0.5983815, 1.27450867]),
'bicycle': np.array([1.76, 0.63, 1.44]),
'Tram': np.array([16.17150617, 2.53246914, 3.53079012]),
'Misc': np.array([3.64300781, 1.54298177, 1.92320313])}
g_mean_size_arr = np.zeros((NUM_SIZE_CLUSTER, 3)) # size clustrs
for i in range(NUM_SIZE_CLUSTER):
g_mean_size_arr[i, :] = g_type_mean_size[g_class2type[i]]
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
# def bytes_feature(value):
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def rotate_pc_along_y(pc, rot_angle):
'''
Input:
pc: numpy array (N,C), first 3 channels are XYZ
z is facing forward, x is left ward, y is downward
rot_angle: rad scalar
Output:
pc: updated pc with XYZ rotated
'''
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
def angle2class(angle, num_class):
''' Convert continuous angle to discrete class and residual.
Input:
angle: rad scalar, from 0-2pi (or -pi~pi), class center at
0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
num_class: int scalar, number of classes N
Output:
class_id, int, among 0,1,...,N-1
residual_angle: float, a number such that
class*(2pi/N) + residual_angle = angle
'''
angle = angle % (2 * np.pi)
assert (0 <= angle <= 2 * np.pi)
angle_per_class = 2 * np.pi / float(num_class)
shifted_angle = (angle + angle_per_class / 2) % (2 * np.pi)
class_id = int(shifted_angle / angle_per_class)
residual_angle = shifted_angle - \
(class_id * angle_per_class + angle_per_class / 2)
return class_id, residual_angle
def class2angle(pred_cls, residual, num_class, to_label_format=True):
''' Inverse function to angle2class.
If to_label_format, adjust angle to the range as in labels.
'''
angle_per_class = 2 * np.pi / float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle > np.pi:
angle = angle - 2 * np.pi
return angle
def size2class(size, type_name):
""" Convert 3D bounding box size to template class and residuals.
todo (rqi): support multiple size clusters per type.
Input:
size: numpy array of shape (3,) for (l,w,h)
type_name: string
Output:
size_class: int scalar
size_residual: numpy array of shape (3,)
"""
size_class = g_type2class[type_name]
size_residual = size - g_type_mean_size[type_name]
return size_class, size_residual
def class2size(pred_cls, residual):
''' Inverse function to size2class. '''
mean_size = g_type_mean_size[g_class2type[pred_cls]]
return mean_size + residual
class tfrecGen_test(object):
"""Convert the pickle file generated during frustum extraction into tensorflow recond file
inputs:
pickle_path: Path to the test pickle file
tfrec_path: Path to save the tensorflow record file"""
def __init__(self, pickle_path):
self.npoints = 1024
self.random_flip = True
self.random_shift = True
self.rotate_to_center = True
self.one_hot = True
self.from_rgb_detection = False
self.g_type2onehotclass = {'car': 0, 'pedestrian': 1, 'bicycle': 2}
with open(pickle_path, 'rb') as fp:
self.id_list = pickle.load(fp, encoding='latin1')
self.box2d_list = pickle.load(fp, encoding='latin1')
self.input_list = pickle.load(fp, encoding='latin1')
self.type_list = pickle.load(fp, encoding='latin1')
# frustum_angle is clockwise angle from positive x-axis
self.frustum_angle_list = pickle.load(fp, encoding='latin1')
self.prob_list = pickle.load(fp, encoding='latin1')
def get_center_view_rot_angle(self, index):
""" Get the frustum rotation angle, it isshifted by pi/2 so that it
can be directly used to adjust GT heading angle """
return np.pi / 2.0 + self.frustum_angle_list[index]
def get_center_view_point_set(self, index):
""" Frustum rotation of point clouds.
NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
"""
# Use np.copy to avoid corrupting original data
point_set = np.copy(self.input_list[index])
return rotate_pc_along_y(point_set, self.get_center_view_rot_angle(index))
def feature_extraction(self, index):
""" Get index-th element from the picked file dataset. """
# ------------------------------ INPUTS ----------------------------
self.class_list = []
for i in range(len(self.type_list)):
self.class_list.append(self.g_type2onehotclass[self.type_list[i]])
rot_angle = self.get_center_view_rot_angle(index)
scene_id = self.id_list[index]
box_2D = self.box2d_list[index]
cls_index = self.class_list[index]
print(cls_index)
# Compute one hot vector
if self.one_hot:
cls_type = self.type_list[index]
assert (cls_type in ['car', 'pedestrian', 'bicycle'])
one_hot_vec = np.zeros(3, dtype=int)
one_hot_vec[self.g_type2onehotclass[cls_type]] = 1
if self.rotate_to_center:
point_set = self.get_center_view_point_set(index)
else:
point_set = self.input_list[index]
# Resample
if point_set.shape[0] < self.npoints:
choice = np.random.choice(point_set.shape[0], self.npoints, replace=True)
else:
choice = np.random.choice(point_set.shape[0], self.npoints, replace=False)
point_set = point_set[choice, :3]
if self.one_hot:
return point_set, rot_angle, self.prob_list[index], one_hot_vec, cls_index, scene_id, box_2D
else:
return point_set, rot_angle, self.prob_list[index]
def serialize_example(self, point_set, rot_angle, prob_value, one_hot_vec, cls_index, scene_id, box_2D):
feature = {'frustum_point_cloud': float_list_feature(point_set.ravel()),
'rot_angle': float_feature(rot_angle),
'one_hot_vec': int64_list_feature(one_hot_vec),
'prob': float_feature(prob_value),
'type_name': int64_feature(cls_index),
'sample_token': bytes_feature(scene_id.encode('utf-8')),
'box_2d': float_list_feature(box_2D.ravel())}
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto
def write_tfrec(self, tfrec_name):
with tf.io.TFRecordWriter(tfrec_name) as tfrw:
for i in range(len(self.id_list)):
point_set, rot_angle, prob, one_hot_vec, cls_index, scene_id, box_2D = self.feature_extraction(i)
tfexample = self.serialize_example(point_set, rot_angle, prob, one_hot_vec, cls_index, scene_id, box_2D)
tfrw.write(tfexample.SerializeToString())
class tfrec_Gen_Train_Val(object):
def __init__(self, pickle_path):
self.npoints = 1024
self.random_flip = True
self.random_shift = True
self.rotate_to_center = True
self.one_hot = True
self.from_rgb_detection = False
with open(pickle_path, 'rb') as fp:
self.id_list = pickle.load(fp, encoding='latin1')
self.box2d_list = pickle.load(fp, encoding='latin1')
self.box3d_list = pickle.load(fp, encoding='latin1')
self.input_list = pickle.load(fp, encoding='latin1')
self.label_list = pickle.load(fp, encoding='latin1')
self.type_list = pickle.load(fp, encoding='latin1')
self.heading_list = pickle.load(fp, encoding='latin1')
self.size_list = pickle.load(fp, encoding='latin1')
# frustum_angle is clockwise angle from positive x-axis
self.frustum_angle_list = pickle.load(fp, encoding='latin1')
print(len(self.id_list))
def get_box3d_center(self, index):
""" Get the center (XYZ) of 3D bounding box. """
box3d_center = (self.box3d_list[index][0, :] + self.box3d_list[index][6, :]) / 2.0
return box3d_center
def get_center_view_box3d_center(self, index):
""" Frustum rotation of 3D bounding box center. """
box3d_center = (self.box3d_list[index][0, :] +
self.box3d_list[index][6, :]) / 2.0
return rotate_pc_along_y(np.expand_dims(box3d_center, 0),
self.get_center_view_rot_angle(index)).squeeze()
def get_center_view_box3d(self, index):
""" Frustum rotation of 3D bounding box corners. """
box3d = self.box3d_list[index]
box3d_center_view = np.copy(box3d)
return rotate_pc_along_y(box3d_center_view, self.get_center_view_rot_angle(index))
def get_center_view_point_set(self, index):
""" Frustum rotation of point clouds.
NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
"""
# Use np.copy to avoid corrupting original data
point_set = np.copy(self.input_list[index])
return rotate_pc_along_y(point_set, self.get_center_view_rot_angle(index))
def get_center_view_rot_angle(self, index):
""" Get the frustum rotation angle, it isshifted by pi/2 so that it
can be directly used to adjust GT heading angle """
return np.pi / 2.0 + self.frustum_angle_list[index]
def feature_extraction(self, index):
""" Get index-th element from the picked file dataset. """
# ------------------------------ INPUTS ----------------------------
rot_angle = self.get_center_view_rot_angle(index)
# Compute one hot vector
if self.one_hot:
cls_type = self.type_list[index]
assert (cls_type in ['car', 'pedestrian', 'bicycle'])
one_hot_vec = np.zeros(3, dtype=int)
one_hot_vec[g_type2onehotclass[cls_type]] = 1
# Get point cloud
if self.rotate_to_center:
point_set = self.get_center_view_point_set(index)
else:
point_set = self.input_list[index]
# Resample
if point_set.shape[0] < self.npoints:
choice = np.random.choice(point_set.shape[0], self.npoints, replace=True)
else:
choice = np.random.choice(point_set.shape[0], self.npoints, replace=False)
point_set = point_set[choice, :3]
# ------------------------------ LABELS ----------------------------
seg = self.label_list[index]
seg = seg[choice]
# Get center point of 3D box
if self.rotate_to_center:
box3d_center = self.get_center_view_box3d_center(index)
else:
box3d_center = self.get_box3d_center(index)
# Heading
if self.rotate_to_center:
heading_angle = self.heading_list[index] - rot_angle
else:
heading_angle = self.heading_list[index]
# Size
size_class, size_residual = size2class(self.size_list[index], self.type_list[index])
# Data Augmentation
if self.random_flip:
# note: rot_angle won't be correct if we have random_flip
# so do not use it in case of random flipping.
if np.random.random() > 0.5: # 50% chance flipping
point_set[:, 0] *= -1
box3d_center[0] *= -1
heading_angle = np.pi - heading_angle
if self.random_shift:
dist = np.sqrt(np.sum(box3d_center[0] ** 2 + box3d_center[1] ** 2))
shift = np.clip(np.random.randn() * dist * 0.05, dist * 0.8, dist * 1.2)
point_set[:, 2] += shift
box3d_center[2] += shift
angle_class, angle_residual = angle2class(heading_angle,
NUM_HEADING_BIN)
if self.one_hot:
return point_set, seg, box3d_center, angle_class, angle_residual, \
size_class, size_residual, rot_angle, one_hot_vec
else:
return point_set, seg, box3d_center, angle_class, angle_residual, \
size_class, size_residual, rot_angle
def serialize_example(self, point_set, seg, box3d_center, angle_class, angle_residual, size_class, size_residual,
rot_angle, one_hot_vec):
feature = {'frustum_point_cloud': float_list_feature(point_set.ravel()),
'seg_label': float_list_feature(seg),
'box3d_center': float_list_feature(box3d_center),
'angle_class': int64_feature(angle_class),
'angle_residual': float_feature(angle_residual),
'size_class': int64_feature(size_class),
'size_residual': float_list_feature(size_residual.ravel()),
'rot_angle': float_feature(rot_angle),
'one_hot_vec': float_list_feature(one_hot_vec)}
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto
def write_tfrec(self, tfrec_name):
with tf.io.TFRecordWriter(tfrec_name) as tfrw:
for i in range(len(self.id_list)):
point_set, seg, box3d_center, angle_class, angle_residual, \
size_class, size_residual, rot_angle, one_hot_vec = self.feature_extraction(i)
tfexample = self.serialize_example(point_set, seg, box3d_center, angle_class, angle_residual, size_class,
size_residual,
rot_angle, one_hot_vec)
tfrw.write(tfexample.SerializeToString())
| [
"numpy.copy",
"numpy.random.choice",
"numpy.random.random",
"pickle.load",
"tensorflow.io.TFRecordWriter",
"tensorflow.train.Int64List",
"tensorflow.train.BytesList",
"numpy.array",
"numpy.zeros",
"tensorflow.train.Features",
"tensorflow.constant",
"numpy.cos",
"tensorflow.train.FloatList",
... | [((972, 1003), 'numpy.zeros', 'np.zeros', (['(NUM_SIZE_CLUSTER, 3)'], {}), '((NUM_SIZE_CLUSTER, 3))\n', (980, 1003), True, 'import numpy as np\n'), ((410, 438), 'numpy.array', 'np.array', (['[4.76, 1.93, 1.72]'], {}), '([4.76, 1.93, 1.72])\n', (418, 438), True, 'import numpy as np\n'), ((467, 512), 'numpy.array', 'np.array', (['[5.06763659, 1.9007158, 2.20532825]'], {}), '([5.06763659, 1.9007158, 2.20532825])\n', (475, 512), True, 'import numpy as np\n'), ((543, 589), 'numpy.array', 'np.array', (['[10.13586957, 2.58549199, 3.2520595]'], {}), '([10.13586957, 2.58549199, 3.2520595])\n', (551, 589), True, 'import numpy as np\n'), ((625, 653), 'numpy.array', 'np.array', (['[0.81, 0.77, 1.78]'], {}), '([0.81, 0.77, 1.78])\n', (633, 653), True, 'import numpy as np\n'), ((693, 738), 'numpy.array', 'np.array', (['[0.80057803, 0.5983815, 1.27450867]'], {}), '([0.80057803, 0.5983815, 1.27450867])\n', (701, 738), True, 'import numpy as np\n'), ((771, 799), 'numpy.array', 'np.array', (['[1.76, 0.63, 1.44]'], {}), '([1.76, 0.63, 1.44])\n', (779, 799), True, 'import numpy as np\n'), ((829, 876), 'numpy.array', 'np.array', (['[16.17150617, 2.53246914, 3.53079012]'], {}), '([16.17150617, 2.53246914, 3.53079012])\n', (837, 876), True, 'import numpy as np\n'), ((906, 952), 'numpy.array', 'np.array', (['[3.64300781, 1.54298177, 1.92320313]'], {}), '([3.64300781, 1.54298177, 1.92320313])\n', (914, 952), True, 'import numpy as np\n'), ((2312, 2329), 'numpy.cos', 'np.cos', (['rot_angle'], {}), '(rot_angle)\n', (2318, 2329), True, 'import numpy as np\n'), ((2343, 2360), 'numpy.sin', 'np.sin', (['rot_angle'], {}), '(rot_angle)\n', (2349, 2360), True, 'import numpy as np\n'), ((2374, 2421), 'numpy.array', 'np.array', (['[[cosval, -sinval], [sinval, cosval]]'], {}), '([[cosval, -sinval], [sinval, cosval]])\n', (2382, 2421), True, 'import numpy as np\n'), ((2464, 2484), 'numpy.transpose', 'np.transpose', (['rotmat'], {}), '(rotmat)\n', (2476, 2484), True, 'import numpy as np\n'), ((6003, 6034), 'numpy.copy', 'np.copy', (['self.input_list[index]'], {}), '(self.input_list[index])\n', (6010, 6034), True, 'import numpy as np\n'), ((10500, 10514), 'numpy.copy', 'np.copy', (['box3d'], {}), '(box3d)\n', (10507, 10514), True, 'import numpy as np\n'), ((10896, 10927), 'numpy.copy', 'np.copy', (['self.input_list[index]'], {}), '(self.input_list[index])\n', (10903, 10927), True, 'import numpy as np\n'), ((1184, 1217), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (1202, 1217), True, 'import tensorflow as tf\n'), ((1291, 1322), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'value'}), '(value=value)\n', (1309, 1322), True, 'import tensorflow as tf\n'), ((1541, 1555), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (1552, 1555), True, 'import tensorflow as tf\n'), ((1684, 1717), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (1702, 1717), True, 'import tensorflow as tf\n'), ((1791, 1822), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': 'value'}), '(value=value)\n', (1809, 1822), True, 'import tensorflow as tf\n'), ((1896, 1927), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (1914, 1927), True, 'import tensorflow as tf\n'), ((1996, 2029), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': '[value]'}), '(value=[value])\n', (2014, 2029), True, 'import tensorflow as tf\n'), ((5034, 5068), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (5045, 5068), False, 'import pickle\n'), ((5099, 5133), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (5110, 5133), False, 'import pickle\n'), ((5164, 5198), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (5175, 5198), False, 'import pickle\n'), ((5228, 5262), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (5239, 5262), False, 'import pickle\n'), ((5369, 5403), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (5380, 5403), False, 'import pickle\n'), ((5433, 5467), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (5444, 5467), False, 'import pickle\n'), ((6858, 6880), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (6866, 6880), True, 'import numpy as np\n'), ((7189, 7253), 'numpy.random.choice', 'np.random.choice', (['point_set.shape[0]', 'self.npoints'], {'replace': '(True)'}), '(point_set.shape[0], self.npoints, replace=True)\n', (7205, 7253), True, 'import numpy as np\n'), ((7289, 7354), 'numpy.random.choice', 'np.random.choice', (['point_set.shape[0]', 'self.npoints'], {'replace': '(False)'}), '(point_set.shape[0], self.npoints, replace=False)\n', (7305, 7354), True, 'import numpy as np\n'), ((8343, 8375), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['tfrec_name'], {}), '(tfrec_name)\n', (8363, 8375), True, 'import tensorflow as tf\n'), ((9068, 9102), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9079, 9102), False, 'import pickle\n'), ((9133, 9167), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9144, 9167), False, 'import pickle\n'), ((9198, 9232), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9209, 9232), False, 'import pickle\n'), ((9263, 9297), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9274, 9297), False, 'import pickle\n'), ((9328, 9362), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9339, 9362), False, 'import pickle\n'), ((9392, 9426), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9403, 9426), False, 'import pickle\n'), ((9459, 9493), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9470, 9493), False, 'import pickle\n'), ((9523, 9557), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9534, 9557), False, 'import pickle\n'), ((9664, 9698), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin1"""'}), "(fp, encoding='latin1')\n", (9675, 9698), False, 'import pickle\n'), ((11696, 11718), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (11704, 11718), True, 'import numpy as np\n'), ((12048, 12112), 'numpy.random.choice', 'np.random.choice', (['point_set.shape[0]', 'self.npoints'], {'replace': '(True)'}), '(point_set.shape[0], self.npoints, replace=True)\n', (12064, 12112), True, 'import numpy as np\n'), ((12148, 12213), 'numpy.random.choice', 'np.random.choice', (['point_set.shape[0]', 'self.npoints'], {'replace': '(False)'}), '(point_set.shape[0], self.npoints, replace=False)\n', (12164, 12213), True, 'import numpy as np\n'), ((14949, 14981), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['tfrec_name'], {}), '(tfrec_name)\n', (14969, 14981), True, 'import tensorflow as tf\n'), ((8225, 8259), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (8242, 8259), True, 'import tensorflow as tf\n'), ((13104, 13122), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (13120, 13122), True, 'import numpy as np\n'), ((13340, 13391), 'numpy.sum', 'np.sum', (['(box3d_center[0] ** 2 + box3d_center[1] ** 2)'], {}), '(box3d_center[0] ** 2 + box3d_center[1] ** 2)\n', (13346, 13391), True, 'import numpy as np\n'), ((14831, 14865), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (14848, 14865), True, 'import tensorflow as tf\n'), ((10212, 10243), 'numpy.expand_dims', 'np.expand_dims', (['box3d_center', '(0)'], {}), '(box3d_center, 0)\n', (10226, 10243), True, 'import numpy as np\n'), ((13421, 13438), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (13436, 13438), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys, json
import netCDF4 as nc
import numpy as np
import pylab as pl
import datetime as dt
#############
# Constants #
#############
Lv = 2.5E6
Rd = 287
Rv = 461
T0 = 273.15
E0 = 6.11
######################################
# Data from Materhorn Field Campaign #
######################################
# materhorn data files
obs = open('observations/efs_sage_iop5_5min.dat','r').read().split()[20::]
lem = open('observations/lems_iop5_10sec.dat','r').read().split()[17::]
# atmospheric data
pa = np.array(obs[5::20]).astype(float)*100.0
ws = np.array(obs[6::20]).astype(float)
wd = np.array(obs[7::20]).astype(float)
pt = np.array(obs[8::20]).astype(float)+T0
rh = np.array(obs[9::20]).astype(float)
net = np.array(obs[10::20]).astype(float)
ntime = len(ws)
# convert relative humidity to mixing ratio
es = E0*np.exp(Lv/Rv * (1/T0 - 1/pt))
e = rh*es/100
sh = e * Rd / (Rv * (pa/100 - e))
qs = sh / (sh + 1)
# soil moisture
sm_02 = np.float(lem[15])
sm_07 = np.float(obs[11])
sm_25 = np.float(lem[13])
sm_00 = sm_02
sm_ob = np.array([sm_00,sm_02,sm_07,sm_25])
z_obm = np.array([0.000,0.025,0.075,0.25])
# soil temperature
st_01 = np.float(obs[12])+T0
st_02 = np.float(obs[13])+T0
st_05 = np.float(obs[14])+T0
st_07 = np.float(obs[15])+T0
st_10 = np.float(obs[16])+T0
st_15 = np.float(obs[17])+T0
st_25 = np.float(obs[18])+T0
st_70 = np.float(obs[19])+T0
st_00 = st_01
st_ob = np.array([st_00,st_01,st_02,st_05,st_07,st_10,st_15,st_25,st_70])
z_obs = np.array([0.000,0.01,0.025,0.050,0.075,0.100,0.150,0.250,0.700])
nsoil = len(z_obs)
# interpolate soil moisture to temperature grid
sm_oi = np.interp(z_obs,z_obm,sm_ob)
# soil type from USDA 11-category + peat
# 1 = sand
# 2 = loamy sand
# 3 = sandy loam
# 4 = silty loam
# 5 = loam
# 6 = sandy clay loam
# 7 = silty clay loam
# 8 = clay loam
# 9 = sandy clay
# 10 = silty clay
# 11 = clay
# 12 = peat
stype = np.full((nsoil),4)
##############################
# Write all time series data #
##############################
metr = {}
metr['time'] = {}
metr['data'] = {}
metr['time']['ntime'] = ntime
metr['time']['tstep'] = 300.0
metr['data']['atm_U'] = ws.tolist()
metr['data']['atm_T'] = pt.tolist()
metr['data']['atm_q'] = qs.tolist()
metr['data']['atm_p'] = pa.tolist()
metr['data']['R_net'] = net.tolist()
with open('inputOffline.json', 'w') as outfile:
json.dump(metr,outfile,indent=4)
########################
# Settings for UtahLSM #
########################
namelist = {}
namelist['time'] = {}
namelist['grid'] = {}
namelist['length'] = {}
namelist['soil'] = {}
namelist['radiation'] = {}
namelist['output'] = {}
# grid section
namelist['time']['step_seb'] = 10
namelist['time']['step_dif'] = 10
# grid section
namelist['grid']['nx'] = 1
namelist['grid']['ny'] = 1
# length scale section
namelist['length']['z_o'] = 0.15
namelist['length']['z_t'] = 0.0015
namelist['length']['z_m'] = 10.0
namelist['length']['z_s'] = 2.0
# soil section
namelist['soil']['nsoil'] = nsoil
namelist['soil']['param'] = 3
namelist['soil']['model'] = 2
namelist['soil']['soil_z'] = z_obs.tolist()
namelist['soil']['soil_type'] = stype.tolist()
namelist['soil']['soil_T'] = st_ob.tolist()
namelist['soil']['soil_q'] = sm_oi.tolist()
# radiation section
namelist['radiation']['utc_start'] = 12.0
namelist['radiation']['comp_rad'] = 0
namelist['radiation']['albedo'] = 0.33
namelist['radiation']['emissivity'] = 0.99
namelist['radiation']['latitude'] = 40.121360
namelist['radiation']['longitude'] = -113.129070
namelist['radiation']['julian_day'] = 133
# output section
namelist['output']['save'] = 1
namelist['output']['fields'] = ['all']
with open('inputLSM.json', 'w') as outfile:
json.dump(namelist,outfile,indent=4) | [
"numpy.float",
"numpy.exp",
"numpy.array",
"numpy.interp",
"numpy.full",
"json.dump"
] | [((983, 1000), 'numpy.float', 'np.float', (['lem[15]'], {}), '(lem[15])\n', (991, 1000), True, 'import numpy as np\n'), ((1009, 1026), 'numpy.float', 'np.float', (['obs[11]'], {}), '(obs[11])\n', (1017, 1026), True, 'import numpy as np\n'), ((1035, 1052), 'numpy.float', 'np.float', (['lem[13]'], {}), '(lem[13])\n', (1043, 1052), True, 'import numpy as np\n'), ((1075, 1113), 'numpy.array', 'np.array', (['[sm_00, sm_02, sm_07, sm_25]'], {}), '([sm_00, sm_02, sm_07, sm_25])\n', (1083, 1113), True, 'import numpy as np\n'), ((1119, 1154), 'numpy.array', 'np.array', (['[0.0, 0.025, 0.075, 0.25]'], {}), '([0.0, 0.025, 0.075, 0.25])\n', (1127, 1154), True, 'import numpy as np\n'), ((1428, 1501), 'numpy.array', 'np.array', (['[st_00, st_01, st_02, st_05, st_07, st_10, st_15, st_25, st_70]'], {}), '([st_00, st_01, st_02, st_05, st_07, st_10, st_15, st_25, st_70])\n', (1436, 1501), True, 'import numpy as np\n'), ((1502, 1565), 'numpy.array', 'np.array', (['[0.0, 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.25, 0.7]'], {}), '([0.0, 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.25, 0.7])\n', (1510, 1565), True, 'import numpy as np\n'), ((1643, 1673), 'numpy.interp', 'np.interp', (['z_obs', 'z_obm', 'sm_ob'], {}), '(z_obs, z_obm, sm_ob)\n', (1652, 1673), True, 'import numpy as np\n'), ((1923, 1940), 'numpy.full', 'np.full', (['nsoil', '(4)'], {}), '(nsoil, 4)\n', (1930, 1940), True, 'import numpy as np\n'), ((860, 895), 'numpy.exp', 'np.exp', (['(Lv / Rv * (1 / T0 - 1 / pt))'], {}), '(Lv / Rv * (1 / T0 - 1 / pt))\n', (866, 895), True, 'import numpy as np\n'), ((1182, 1199), 'numpy.float', 'np.float', (['obs[12]'], {}), '(obs[12])\n', (1190, 1199), True, 'import numpy as np\n'), ((1211, 1228), 'numpy.float', 'np.float', (['obs[13]'], {}), '(obs[13])\n', (1219, 1228), True, 'import numpy as np\n'), ((1240, 1257), 'numpy.float', 'np.float', (['obs[14]'], {}), '(obs[14])\n', (1248, 1257), True, 'import numpy as np\n'), ((1269, 1286), 'numpy.float', 'np.float', (['obs[15]'], {}), '(obs[15])\n', (1277, 1286), True, 'import numpy as np\n'), ((1298, 1315), 'numpy.float', 'np.float', (['obs[16]'], {}), '(obs[16])\n', (1306, 1315), True, 'import numpy as np\n'), ((1327, 1344), 'numpy.float', 'np.float', (['obs[17]'], {}), '(obs[17])\n', (1335, 1344), True, 'import numpy as np\n'), ((1356, 1373), 'numpy.float', 'np.float', (['obs[18]'], {}), '(obs[18])\n', (1364, 1373), True, 'import numpy as np\n'), ((1385, 1402), 'numpy.float', 'np.float', (['obs[19]'], {}), '(obs[19])\n', (1393, 1402), True, 'import numpy as np\n'), ((2379, 2413), 'json.dump', 'json.dump', (['metr', 'outfile'], {'indent': '(4)'}), '(metr, outfile, indent=4)\n', (2388, 2413), False, 'import sys, json\n'), ((3735, 3773), 'json.dump', 'json.dump', (['namelist', 'outfile'], {'indent': '(4)'}), '(namelist, outfile, indent=4)\n', (3744, 3773), False, 'import sys, json\n'), ((580, 600), 'numpy.array', 'np.array', (['obs[6::20]'], {}), '(obs[6::20])\n', (588, 600), True, 'import numpy as np\n'), ((623, 643), 'numpy.array', 'np.array', (['obs[7::20]'], {}), '(obs[7::20])\n', (631, 643), True, 'import numpy as np\n'), ((712, 732), 'numpy.array', 'np.array', (['obs[9::20]'], {}), '(obs[9::20])\n', (720, 732), True, 'import numpy as np\n'), ((755, 776), 'numpy.array', 'np.array', (['obs[10::20]'], {}), '(obs[10::20])\n', (763, 776), True, 'import numpy as np\n'), ((531, 551), 'numpy.array', 'np.array', (['obs[5::20]'], {}), '(obs[5::20])\n', (539, 551), True, 'import numpy as np\n'), ((666, 686), 'numpy.array', 'np.array', (['obs[8::20]'], {}), '(obs[8::20])\n', (674, 686), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
'''
#############################################################################
DESCRIPTION DATA.PY
Author: <NAME> (University of Edinburgh)
This module can be used to generate random order data.
Functions:
- generate_orders(): Generate some input data (order information)
for our simulation experiments.
#############################################################################
'''
def generate_orders(seed_no, simulation_period = 120, backlog = 100, interval = 15, arrival_mean = 5, arrival_stddev = 0, arrival_list = None,
min_lead = 30, avg_items_per_order = 1.5, first_due = 30, inter_due = 30,
aisle_no = 10, aisle_length = 45, log_file = 'logging.txt', metrics_file = 'metrics.txt'):
'''
Generate some input data (order information) for our simulation experiments.
Users can specify their desired time horizon, backlog of orders,
arrival process as well as many other parameters and receive a data frame
containing the arrival and departure times, size and location of the
various generated orders.
Inputs:
seed_no (int): random seed for replication purposes
simulation_period (int): time horizon of simulation (in minutes)
backlog (int): number of orders in backlog at t=0
interval (int): interval in which lambda changes (exponential arrival process)
arrival_mean (float): mean of lambda (exponential arrival process)
arrival_stddev (float): standard dev. of lambda (exponential arrival process)
arrival_list (list): list with prespecified lambdas (exponential arrival process);
alternatively to arrival_mean and arrival_stddev
min_lead (int): minimum time between order arrival and departure
avg_items_per_order (int): avg. no. of items (SKUs) included in a single order
first_due (int): departure time of first vehicle (in minutes after t=0)
inter_due (int): time between two vehicle departures
aisle_no (int): number of aisles in considered one-block warehouse
aisle_length (int): length of aisles (in meters) in considered one-block warehouse
log_file (.txt): file for simulation run protocol
metrics_file (.txt): file for simulation run results
Outputs:
df (DataFrame): DataFrame containing information on
- order ID
- order arrivaltime
- order departuretime
- number of items
- size of each item
- location of each item (x,y)
'''
#WRITE HEADER FOR RESULT FILES
for files in [log_file, metrics_file]:
file = open(files, 'a')
file.write(f"" + '\n')
file.write(f"###### DATA INFORMATION ########"+ '\n')
file.write(f"simulation_period: {simulation_period}" + '\n')
file.write(f"backlog: {backlog}" + '\n')
file.write(f"min_lead: {min_lead}" + '\n')
if(arrival_list != None):
file.write(f"arrivals: list = {arrival_list} (interval = {interval})" + '\n')
else:
file.write(f"arrivals: mean = {arrival_mean}, stddev = {arrival_stddev} (interval = {interval})" + '\n')
file.write(f"seed_no: {seed_no}" + '\n')
file.write(f"" + '\n')
file.close()
#Initialize df
cols = ["order_ID", "arrivaltime", "departuretime", "size", "x_coord", "y_coord"]
df = pd.DataFrame(columns = cols)
#Initialize random number generator and other parameters
rng = np.random.default_rng(seed = seed_no)
t = 0
d = 1
order_ID = 0
# GENERATE BACKLOG
for i in range(backlog):
arrivaltime = t
order_ID = order_ID + 1
#Assign order to one of the vehicles and get departuretime
lead = rng.binomial(n = 5, p = 0.1)
departuretime = first_due + lead * inter_due
#Generate no_items included in order
#for each order: generate row entry
no_items = rng.geometric(1/avg_items_per_order)
for k in range(no_items):
size = rng.integers(1,10)
x_coord = rng.integers(0, aisle_no)
y_coord = rng.integers(1, aisle_length+1)
df = df.append({"order_ID": order_ID, "arrivaltime": arrivaltime,
"departuretime": departuretime, "size": size,
"x_coord": x_coord, "y_coord": y_coord}, ignore_index=True)
# GENERATE NEW ARRIVING ORDERS
#Determine arrivalrate
# Option 1: specfic arrival list as input
if(arrival_list != None):
arrivalrate = arrival_list[0]
# Option 2: generate normally distributed arrival rate
else:
arrivalrate = rng.normal(arrival_mean, arrival_stddev)
#Note: arrival rates <= 0 are not valid
if arrivalrate < 0:
arrivalrate = 0.05
# Generate first arrivaltime
t = rng.exponential(1/arrivalrate)
while t < simulation_period:
arrivaltime = t
order_ID = order_ID + 1
#Assign order to one of the vehicles and get departuretime
earliest_departure = t + min_lead
index_earliest_vehicle = max(0,np.ceil((earliest_departure-first_due)/inter_due))
lead = rng.binomial(n = 5, p = 0.1)
departuretime = first_due + (index_earliest_vehicle + lead)*inter_due
#Generate no_items included in order
#for each order: generate row entry
no_items = rng.geometric(1/avg_items_per_order)
for k in range(no_items):
size = rng.integers(1,10)
x_coord = rng.integers(0, aisle_no)
y_coord = rng.integers(1, aisle_length+1)
df = df.append({"order_ID": order_ID, "arrivaltime": arrivaltime,
"departuretime": departuretime, "size": size,
"x_coord": x_coord, "y_coord": y_coord}, ignore_index=True)
#Check if arrivalrate has to be updated
if(arrivaltime > d*interval):
#Update interval_counter
d += 1
#Determine arrivalrate
# Option 1: specfic arrival list as input
if(arrival_list != None):
arrivalrate = arrival_list[d-1]
# Option 2: generate normally distributed arrival rate
else:
arrivalrate = rng.normal(arrival_mean, arrival_stddev)
#Note: arrival rates <= 0 are not valid
if arrivalrate < 0:
arrivalrate = 0.05
#Generate next arrivaltime
t = arrivaltime + rng.exponential(1/arrivalrate)
#Change data types of columns
df = df.astype({"order_ID": int, "arrivaltime": float, "departuretime": float, "size": int, "x_coord": int, "y_coord":int})
return df
| [
"pandas.DataFrame",
"numpy.ceil",
"numpy.random.default_rng"
] | [((3830, 3856), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols'}), '(columns=cols)\n', (3842, 3856), True, 'import pandas as pd\n'), ((3931, 3966), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'seed_no'}), '(seed=seed_no)\n', (3952, 3966), True, 'import numpy as np\n'), ((5540, 5593), 'numpy.ceil', 'np.ceil', (['((earliest_departure - first_due) / inter_due)'], {}), '((earliest_departure - first_due) / inter_due)\n', (5547, 5593), True, 'import numpy as np\n')] |
# Randomly simulates a determinantally-thinned Poisson point process.
#
# A determinantally-thinned Poisson point process is essentially a discrete
# determinantal point process whose underlying state space is a single
# realization of a Poisson point process defined on some bounded continuous
# space.
#
# For more details, see the paper by Blaszczyszyn and Keeler[1].
#
# This is an updated version of the file in the repository[2].
#
# Author: <NAME>, Inria/ENS, Paris, and University of Melbourne,
# Melbourne, 2018.
#
#References:
#[1] Blaszczyszyn and Keeler, Determinantal thinning of point processes with
#network learning applications, 2018.
# [2] Keeler, 2018, https://github.com/hpaulkeeler/DetPoisson_Python
#import relevant libraries
import numpy as np
from scipy.linalg import orth
import matplotlib.pyplot as plt
plt.close("all"); # close all figures
##set random seed for reproducibility
#np.random.seed(1);
#START -- Parameters -- START
#Poisson point process parameters
lambda0=50; #intensity (ie mean density) of the Poisson process
#choose kernel
choiceKernel=1;#1 for Gaussian (ie squared exponetial ); 2 for Cauchy
sigma=1;# parameter for Gaussian and Cauchy kernel
alpha=1;# parameter for Cauchy kernel
#Simulation window parameters
xMin=0;xMax=1;yMin=0;yMax=1;
xDelta=xMax-xMin;yDelta=yMax-yMin; #rectangle dimensions
areaTotal=xDelta*yDelta; #area of rectangle
#END -- Parameters -- END
#Simulate a Poisson point process
numbPoints =np.random.poisson(lambda0*areaTotal);#Poisson number of points
xx = np.random.uniform(xMin,xMax,numbPoints);#x coordinates of Poisson points
yy = np.random.uniform(yMin,yMax,numbPoints);#y coordinates of Poisson points
# START -- CREATE L matrix -- START
sizeL=numbPoints;
#Calculate Gaussian or kernel kernel based on grid x/y values
#all squared distances of x/y difference pairs
xxDiff=np.outer(xx, np.ones((numbPoints,)))-np.outer( np.ones((numbPoints,)),xx);
yyDiff=np.outer(yy, np.ones((numbPoints,)))-np.outer( np.ones((numbPoints,)),yy)
rrDiffSquared=(xxDiff**2+yyDiff**2);
if choiceKernel==1:
#Gaussian/squared exponential kernel
L=lambda0*np.exp(-(rrDiffSquared)/sigma**2);
elif choiceKernel==2:
#Cauchy kernel
L=lambda0/(1+rrDiffSquared/sigma**2)**(alpha+1/2);
else:
raise Exception('choiceKernel has to be equal to 1 or 2.');
# END-- CREATE L matrix -- # END
# START Simulating/sampling DPP
#Eigen decomposition
eigenValL, eigenVectL=np.linalg.eig(L);
eigenValK = eigenValL / (1+eigenValL); #eigenVal of K
booleEigen = (np.random.rand(sizeL) < eigenValK );#Bernoulli trials
#number of points in the DPP realization
numbPointsDPP= np.sum(booleEigen); #number of points
#retrieve eigenvectors corresponding to successful Bernoulli trials
spaceV = eigenVectL[:, booleEigen]; #subspace V
indexDPP=np.zeros(numbPointsDPP,dtype='int'); #index for final DPP configuration
#Loop through for all points
for ii in range(1):
#Compute probabilities for each point i
Prob_i = np.sum(spaceV**2, axis=1);#sum across rows
Prob_i = np.cumsum(Prob_i/ np.sum(Prob_i)); #normalize
#Choose a new point using PMF Prob_i
uRand=np.random.rand(1);
indexCurrent=(uRand<= Prob_i).argmax();
indexDPP[ii]=indexCurrent;
if ii <numbPointsDPP-1:
#Choose a vector to eliminate
jj = (np.abs(spaceV[indexCurrent, :]) > 0).argmax()
columnVj = spaceV[:, jj];
spaceV=np.delete(spaceV,jj,1) #remove column
#Update matrix V by removing Vj component from the space
spaceV = spaceV- (np.outer(columnVj,(spaceV[indexCurrent, :] / columnVj[indexCurrent])));
#Orthonormalize (using singular value decomposition - could also use qr)
spaceV = orth(spaceV);
#Loop finished
indexDPP=np.sort(indexDPP); #sort points
#END - Simulating/sampling DPP - END
#Plotting
#Plot Poisson point process
plt.scatter(xx,yy, edgecolor='k', facecolor='none');
plt.xlabel("x"); plt.ylabel("y");
#random color vector
vectorColor=np.random.rand(3);
#Plot determinantally-thinned Poisson point process
plt.scatter(xx[indexDPP],yy[indexDPP],edgecolor='none',facecolor=vectorColor); | [
"numpy.abs",
"numpy.linalg.eig",
"numpy.random.poisson",
"matplotlib.pyplot.ylabel",
"numpy.random.rand",
"numpy.ones",
"numpy.sort",
"matplotlib.pyplot.xlabel",
"numpy.delete",
"scipy.linalg.orth",
"matplotlib.pyplot.close",
"numpy.sum",
"numpy.zeros",
"numpy.exp",
"numpy.outer",
"mat... | [((840, 856), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (849, 856), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1514), 'numpy.random.poisson', 'np.random.poisson', (['(lambda0 * areaTotal)'], {}), '(lambda0 * areaTotal)\n', (1493, 1514), True, 'import numpy as np\n'), ((1544, 1585), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax', 'numbPoints'], {}), '(xMin, xMax, numbPoints)\n', (1561, 1585), True, 'import numpy as np\n'), ((1622, 1663), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax', 'numbPoints'], {}), '(yMin, yMax, numbPoints)\n', (1639, 1663), True, 'import numpy as np\n'), ((2479, 2495), 'numpy.linalg.eig', 'np.linalg.eig', (['L'], {}), '(L)\n', (2492, 2495), True, 'import numpy as np\n'), ((2676, 2694), 'numpy.sum', 'np.sum', (['booleEigen'], {}), '(booleEigen)\n', (2682, 2694), True, 'import numpy as np\n'), ((2841, 2877), 'numpy.zeros', 'np.zeros', (['numbPointsDPP'], {'dtype': '"""int"""'}), "(numbPointsDPP, dtype='int')\n", (2849, 2877), True, 'import numpy as np\n'), ((3829, 3846), 'numpy.sort', 'np.sort', (['indexDPP'], {}), '(indexDPP)\n', (3836, 3846), True, 'import numpy as np\n'), ((3938, 3990), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xx', 'yy'], {'edgecolor': '"""k"""', 'facecolor': '"""none"""'}), "(xx, yy, edgecolor='k', facecolor='none')\n", (3949, 3990), True, 'import matplotlib.pyplot as plt\n'), ((3991, 4006), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (4001, 4006), True, 'import matplotlib.pyplot as plt\n'), ((4008, 4023), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (4018, 4023), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4075), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (4072, 4075), True, 'import numpy as np\n'), ((4129, 4214), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xx[indexDPP]', 'yy[indexDPP]'], {'edgecolor': '"""none"""', 'facecolor': 'vectorColor'}), "(xx[indexDPP], yy[indexDPP], edgecolor='none', facecolor=vectorColor\n )\n", (4140, 4214), True, 'import matplotlib.pyplot as plt\n'), ((2565, 2586), 'numpy.random.rand', 'np.random.rand', (['sizeL'], {}), '(sizeL)\n', (2579, 2586), True, 'import numpy as np\n'), ((3024, 3051), 'numpy.sum', 'np.sum', (['(spaceV ** 2)'], {'axis': '(1)'}), '(spaceV ** 2, axis=1)\n', (3030, 3051), True, 'import numpy as np\n'), ((3188, 3205), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (3202, 3205), True, 'import numpy as np\n'), ((1880, 1902), 'numpy.ones', 'np.ones', (['(numbPoints,)'], {}), '((numbPoints,))\n', (1887, 1902), True, 'import numpy as np\n'), ((1914, 1936), 'numpy.ones', 'np.ones', (['(numbPoints,)'], {}), '((numbPoints,))\n', (1921, 1936), True, 'import numpy as np\n'), ((1962, 1984), 'numpy.ones', 'np.ones', (['(numbPoints,)'], {}), '((numbPoints,))\n', (1969, 1984), True, 'import numpy as np\n'), ((1996, 2018), 'numpy.ones', 'np.ones', (['(numbPoints,)'], {}), '((numbPoints,))\n', (2003, 2018), True, 'import numpy as np\n'), ((2136, 2171), 'numpy.exp', 'np.exp', (['(-rrDiffSquared / sigma ** 2)'], {}), '(-rrDiffSquared / sigma ** 2)\n', (2142, 2171), True, 'import numpy as np\n'), ((3477, 3501), 'numpy.delete', 'np.delete', (['spaceV', 'jj', '(1)'], {}), '(spaceV, jj, 1)\n', (3486, 3501), True, 'import numpy as np\n'), ((3787, 3799), 'scipy.linalg.orth', 'orth', (['spaceV'], {}), '(spaceV)\n', (3791, 3799), False, 'from scipy.linalg import orth\n'), ((3102, 3116), 'numpy.sum', 'np.sum', (['Prob_i'], {}), '(Prob_i)\n', (3108, 3116), True, 'import numpy as np\n'), ((3615, 3683), 'numpy.outer', 'np.outer', (['columnVj', '(spaceV[indexCurrent, :] / columnVj[indexCurrent])'], {}), '(columnVj, spaceV[indexCurrent, :] / columnVj[indexCurrent])\n', (3623, 3683), True, 'import numpy as np\n'), ((3377, 3408), 'numpy.abs', 'np.abs', (['spaceV[indexCurrent, :]'], {}), '(spaceV[indexCurrent, :])\n', (3383, 3408), True, 'import numpy as np\n')] |
__author__ = '<NAME>'
from PyQt4 import QtCore, QtGui
from core.pco_definitions import PixelFly
from threading import Thread
import os, time, sys, pickle
import pyqtgraph as pg
from astropy.io import fits
import numpy as np
import matlab.engine
from queue import Empty
import pygame, os, time, pickle
import win32api
class CameraWidget(QtGui.QWidget):
"""
The CameraWidget class provides the user interface for the PCO PixelFly camera. It bases the connection to the
camera through the pyPCOPixelFly.pco_definitions module. The basic framework of the class is PyQt4 an wrapper of the
Qt framework and the pyqtgraph (url-here) module is essential for the use of this user interface.
Dependencies:
-- SC2_Cam.dll : the dynamic library that interfaces the camera hardware (please contain it in the same folder as
the file).
-- (Optional) App.ico : the application icon of pco (also needs to be in the same directory).
Basic usage:
Shortcuts:
-- Ctrl + Q : Quits application
-- Ctrl + R :Resets original scale to image
Contact: <NAME>, <EMAIL>
"""
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.path = os.path.dirname(os.path.realpath("__file__"))
self.save_dir = self.path
self.camera = PixelFly(self.path)
self.connected = False
self.alive = False
self.live_view_bool = False
self.u = 1
self.time_unit_dict = dict(us=1, ms=2)
self.save_settings = self.load_settings()
# set background color to dark gray
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QtCore.Qt.darkGray)
self.setPalette(p)
def create_gui(self, MainWindow):
"""
Creates user interface. Initializes all widgets of the application.
:param MainWindow: The Main Application Window -> QtGui.MainWindow()
:return:
"""
# central widget of the Main Window
self.central_widget = QtGui.QWidget(MainWindow)
# set background color to dark gray
self.central_widget.setAutoFillBackground(True)
p = self.central_widget.palette()
p.setColor(self.central_widget.backgroundRole(), QtCore.Qt.darkGray)
self.central_widget.setPalette(p)
# Grid layout to place all widgets
self.widget_layout = QtGui.QGridLayout()
# Graphics Layout Widget to put the image and histogram
self.gw = pg.GraphicsLayoutWidget()
# make margins around image items zero
self.gw.ci.layout.setContentsMargins(0,0,0,0)
# Graphics Layout Widget to put the crosscut curve plot
self.gw_crosscut = pg.GraphicsLayoutWidget()
MainWindow.setCentralWidget(self.central_widget)
# the controls_layout contains all controls of the camera (eg. connection, exposure time, recording..)
self.controls_layout = QtGui.QGridLayout()
self.controls_layout.setSpacing(20) # set spacing between widgets to 20 pixels
# indicators_layout contains all indicators of the camera feed
# The maximum count, the average count in the ROI region, buttons for ROI and crosscut, as well as
# controls of the gray values if the image.
self.indicators_layout = QtGui.QGridLayout()
# ==============================================================================================================
# CONTROL BUTTONS
# ==============================================================================================================
# Button to connect to the camera. Will turn red and display disconnect if it successfully connects.
self.ConnectBtn = QtGui.QPushButton('CONNECT')
self.controls_layout.addWidget(self.ConnectBtn, 0, 0)
# layout for exposure time controls
self.exsposure_time_layout = QtGui.QGridLayout()
self.controls_layout.addItem(self.exsposure_time_layout, 2, 0, 4, 5)
# 6 preset values of exposure time. They will be saved and reloaded through a python pickle file.
preset_values = self.save_settings['exposure times']
time_label1 = QtGui.QLabel("1")
time_label2 = QtGui.QLabel("2")
time_label3 = QtGui.QLabel("3")
time_label4 = QtGui.QLabel("4")
time_label5 = QtGui.QLabel("5")
time_label6 = QtGui.QLabel("6")
self.exp_time1 = QtGui.QPushButton(preset_values[0])
self.exp_time2 = QtGui.QPushButton(preset_values[1])
self.exp_time3 = QtGui.QPushButton(preset_values[2])
self.exp_time4 = QtGui.QPushButton(preset_values[3])
self.exp_time5 = QtGui.QPushButton(preset_values[4])
self.exp_time6 = QtGui.QPushButton(preset_values[5])
exposure_frame_title = QtGui.QLabel("Exposure time controls")
self.exsposure_time_layout.addWidget(exposure_frame_title, 0, 0, 1, 3)
self.exsposure_time_layout.addWidget(time_label1, 1, 0, 1, 1)
self.exsposure_time_layout.addWidget(time_label2, 2, 0, 1, 1)
self.exsposure_time_layout.addWidget(time_label3, 3, 0, 1, 1)
self.exsposure_time_layout.addWidget(time_label4, 1, 2, 1, 1)
self.exsposure_time_layout.addWidget(time_label5, 2, 2, 1, 1)
self.exsposure_time_layout.addWidget(time_label6, 3, 2, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time1, 1,1, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time2, 2,1, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time3, 3,1, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time4, 1,3, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time5, 2,3, 1, 1)
self.exsposure_time_layout.addWidget(self.exp_time6, 3,3, 1, 1)
# Edit line widget to input exposure time. It accepts us and ms units with the option of setting a float for
# the ms time unit (eg. 1.5 ms)
self.exp_time_in = QtGui.QLineEdit()
# time units list
self.time_units = QtGui.QComboBox()
# save the time in one of the preset values.
self.save_time = QtGui.QComboBox()
self.exsposure_time_layout.addWidget(self.exp_time_in, 4, 2, 1, 3)
self.exsposure_time_layout.addWidget(self.time_units, 4, 5, 1, 2)
self.exsposure_time_layout.addWidget(self.save_time, 4, 0, 1, 2)
# layout to host the recording controls
self.recording_layout = QtGui.QGridLayout()
self.controls_layout.addItem(self.recording_layout, 6, 0, 3, 3)
recording_label = QtGui.QLabel("Recording controls")
self.recording_layout.addWidget(recording_label, 0, 0, 1, 3)
# Live button puts the camera in live view. Has to be stopped before exiting.
self.LiveBtn = QtGui.QPushButton('LIVE')
# Records the specified number of frames and lets the user name the file while adding 000x at the end
# of the file name in FITS data format.
self.RecordBtn = QtGui.QPushButton('RECORD')
# stops live view/recording and disarms the camera
self.StopBtn = QtGui.QPushButton('STOP')
# Label for number of frames to save
frame_lab = QtGui.QLabel('# frames to record:')
# Edit line that accepts integers of the number of frames to save.
self.FramesLab = QtGui.QLineEdit()
self.recording_layout.addWidget(self.LiveBtn, 1, 0, 1, 1)
self.recording_layout.addWidget(self.RecordBtn, 1, 1, 1, 1)
#self.recording_layout.addWidget(self.StopBtn, 2, 0)
self.recording_layout.addWidget(frame_lab, 2, 0, 1, 1)
self.recording_layout.addWidget(self.FramesLab, 2, 1)
# Callbacks for all the control buttons
self.exp_time1.clicked.connect(self.exp_time_callback)
self.exp_time2.clicked.connect(self.exp_time_callback)
self.exp_time3.clicked.connect(self.exp_time_callback)
self.exp_time4.clicked.connect(self.exp_time_callback)
self.exp_time5.clicked.connect(self.exp_time_callback)
self.exp_time6.released.connect(self.exp_time_callback)
self.exp_time_list = [self.exp_time1, self.exp_time2, self.exp_time3, self.exp_time4,
self.exp_time5, self.exp_time6]
# Add list options for time unit and save buttons.
self.time_units.addItem("us")
self.time_units.addItem("ms")
self.time_units.activated[str].connect(self.onActivatedUnits)
self.save_time.addItem("Save in")
self.save_time.addItem("1")
self.save_time.addItem("2")
self.save_time.addItem("3")
self.save_time.addItem("4")
self.save_time.addItem("5")
self.save_time.addItem("6")
self.save_time.activated[str].connect(self.onActivatedSave)
# Connect Enter/Return key press with callback for setting the exposure time.
self.exp_time_in.returnPressed.connect(self.onReturnPress)
# Connect callbacks for connect, live and stop buttons
self.ConnectBtn.clicked.connect(self.connect_camera)
self.ConnectBtn.setStyleSheet("background-color: darkCyan")
self.FramesLab.setText('20')
self.LiveBtn.clicked.connect(self.live_callback)
#self.StopBtn.clicked.connect(self.stop_callback)
self.RecordBtn.clicked.connect(self.record_callback)
# layout to host the response matrix m
self.ReponseMatrix_layout = QtGui.QGridLayout()
self.controls_layout.addItem(self.ReponseMatrix_layout, 15, 0, 3, 3)
# to start DM
self.openDM_Btn = QtGui.QPushButton('open DM')
# for response matrix
self.ReponseMatrix_Btn = QtGui.QPushButton('RESPONSE MATRIX')
# to close the DM after measurement
self.closeDM_Btn = QtGui.QPushButton('close DM')
# for the Zernike coefficient
Enter_ZAmplitude = QtGui.QLabel("Amplitude:")
self.Zernike_coef = QtGui.QLineEdit()
self.Zernike_coef.setText("5")
self.ReponseMatrix_layout.addWidget(Enter_ZAmplitude, 1, 0)
self.ReponseMatrix_layout.addWidget(self.Zernike_coef, 1, 1)
self.ReponseMatrix_layout.addWidget(self.ReponseMatrix_Btn, 3, 0)
self.ReponseMatrix_layout.addWidget(self.closeDM_Btn, 2, 1)
self.ReponseMatrix_layout.addWidget(self.openDM_Btn, 2, 0)
self.ReponseMatrix_Btn.clicked.connect(self.Measure_ResponseMatrix)
self.openDM_Btn.clicked.connect(self.open_DM)
self.closeDM_Btn.clicked.connect(self.close_DM)
# layout to host the SLM
self.SLM_layout = QtGui.QGridLayout()
self.controls_layout.addItem(self.SLM_layout, 10, 0, 3, 3)
#to start DM
self.activateSLM_Btn = QtGui.QPushButton('Initialize SLM')
# to close the DM after measurement
# self.closeSLM_Btn = QtGui.QPushButton('close SLM')
# to create the phase map
#self.createSLM_Btn = QtGui.QPushButton('create')
# for the Zernike coefficient
Enter_IoverD = QtGui.QLabel("file:")
#self.Enter_IoverD .setText("D:\Xin LU\\real response matrix\\11X11\\Phase_shift_")
self.file_address = QtGui.QLineEdit()
self.file_address .setText("D:\Xin LU\\real response matrix\\9X9\\phase13_")
self.SLM_layout.addWidget(Enter_IoverD, 1, 0)
self.SLM_layout.addWidget(self.file_address , 1, 1)
self.SLM_layout.addWidget(self.activateSLM_Btn, 2, 0)
self.activateSLM_Btn.clicked.connect(self.activate_SLM)
#self.closeSLM_Btn.clicked.connect(self.close_SLM)
# ==============================================================================================================
# IMAGE OPTIONS AND HANDLES
# ==============================================================================================================
# vb is a viewbox that contains the image item.
self.vb = pg.ViewBox()
# add the view box to the graphics layout
self.gw.addItem(self.vb)
# set the aspect while scaling to be locked, i.e. both axis scale the same.
self.vb.setAspectLocked(lock=True, ratio=1)
# invert Y axis -> PyQt <-> Numpy arrays convention
self.vb.invertY()
# Image Item is the image displaying item. Has a lot of options and the user can zoom in/out by pressing the
# right mouse button and moving the mouse up/down. Furthermore by going over the image with the mouse will
# indicate the coordinates and value.
self.image = pg.ImageItem()
self.vb.addItem(self.image)
# Histogram of the displayed image. User can move the histogram axis and the gray values.
self.hist = pg.HistogramLUTItem(self.image, fillHistogram=False)
self.gw.addItem(self.hist)
# initialize image container variable
self.im = np.zeros((1392, 1040))
# set image to display
self.image.setImage(self.im)
# set initial gray levels
self.image.setLevels([200, 16383])
self.hist.setHistogramRange(200, 16383)
# Region Of Interest(ROI) widget that allows user to define a rectangle of tje image and the average count
# within this will be displayed.
#self.save_settings['ROI position']= ()
self.roi = pg.ROI(pos=self.save_settings['ROI position'], size=self.save_settings['ROI size'])
self.roi.addScaleHandle([1, 1], [0, 0])
self.roi.alive = False
self.vb.addItem(self.roi)
self.roi.hide()
# User can define line and place it on the image and the values profile will be plotted on the crosscut
# graphics layout.
self.line_roi = pg.LineSegmentROI([[680, 520], [720, 520]], pen='r')
self.vb.addItem(self.line_roi)
self.line_roi.hide()
self.line_roi.alive = False
# plot item to contain the crosscut curve
crosscut_plot = pg.PlotItem()
# crosscut curve that plot the data of the line
self.crosscut_curve = pg.PlotCurveItem()
self.gw_crosscut.addItem(crosscut_plot)
crosscut_plot.addItem(self.crosscut_curve)
self.gw_crosscut.hide()
self.gw_crosscut.setFixedWidth(800)
self.gw_crosscut.setFixedHeight(200)
# make viewbox accept mouse hover events
self.vb.acceptHoverEvents()
# connect mouse moving event to callback
self.vb.scene().sigMouseMoved.connect(self.mouseMoved)
self.x, self.y = 0, 0 # mouse position
# connect Ctrl + R key sequence to resetting the image to its original scale
shortcut = QtGui.QShortcut(QtGui.QKeySequence('Ctrl+R'), MainWindow)
shortcut.activated.connect(self.refresh_image)
reset_btn = QtGui.QPushButton('Reset zoom')
reset_btn.clicked.connect(self.refresh_image)
# checkbox enabling log scale
self.log_scale = QtGui.QCheckBox("Log scale")
self.log_scale.stateChanged.connect(self.log_scale_callback)
self.widget_layout.addWidget(self.gw, 0, 0, 6, 8)
self.widget_layout.addWidget(self.gw_crosscut, 6, 3, 2, 6)
self.widget_layout.addItem(self.controls_layout, 1, 8)
self.widget_layout.addItem(self.indicators_layout, 7, 0, 2, 6)
self.indicators_layout.addWidget(reset_btn, 2, 6, 1, 1)
self.indicators_layout.addWidget(self.log_scale, 2, 7, 1, 1)
# Indicator showing maxvalue of image being displayed
self.max_indicator_lab = QtGui.QLabel('Max value')
font = QtGui.QFont("Calibri", 18)
self.max_indicator_lab.setFont(font)
self.indicators_layout.addWidget(self.max_indicator_lab, 0,0,1,1)
self.max_indicator = QtGui.QLabel(str(np.max(self.im)))
self.max_indicator.setFont(font)
self.indicators_layout.addWidget(self.max_indicator, 0,1,1,1)
# Indicator showing average value within roi if it's selected
self.roi_indicator = QtGui.QLabel('-')
self.roi_indicator.setFont(QtGui.QFont("Calibri", 18))
roi_indicator_lab = QtGui.QLabel('ROI average counts:')
roi_indicator_lab.setFont(QtGui.QFont("Calibri", 18))
self.indicators_layout.addWidget(roi_indicator_lab, 1, 0, 1, 1)
self.indicators_layout.addWidget(self.roi_indicator, 1, 1, 1, 1)
# Edit widget that allow setting the gray-levels
self.gray_max = 16383
self.gray_min = 200
self.gray_max_edit = QtGui.QLineEdit(str(self.gray_max))
self.gray_min_edit = QtGui.QLineEdit(str(self.gray_min))
self.gray_min_lab = QtGui.QLabel('Min:')
self.gray_max_lab = QtGui.QLabel('Max:')
self.gray_min_edit.returnPressed.connect(self.set_gray_min)
self.gray_max_edit.returnPressed.connect(self.set_gray_max)
self.indicators_layout.addWidget(self.gray_min_lab, 2, 2, 1, 1)
self.indicators_layout.addWidget(self.gray_max_lab, 2, 4, 1, 1)
self.indicators_layout.addWidget(self.gray_min_edit, 2, 3, 1, 1)
self.indicators_layout.addWidget(self.gray_max_edit, 2, 5, 1, 1)
# Buttons for ROI and crosscut line
roi_button = QtGui.QPushButton('ROI')
crosscut_button = QtGui.QPushButton('Crosscut')
self.indicators_layout.addWidget(roi_button, 2, 0, 1, 1)
self.indicators_layout.addWidget(crosscut_button, 2, 1, 1, 1)
roi_button.clicked.connect(self.roi_clicked)
crosscut_button.clicked.connect(self.crosscut_clicked)
#########################################
self.central_widget.setLayout(self.widget_layout)
# ==============================================================================================================
# MENU BAR
# ==============================================================================================================
self.menubar = QtGui.QMenuBar(MainWindow)
#self.menubar.setGeometry(QtCore.QRect(0, 0, 1027, 35))
filemenu = self.menubar.addMenu('&File')
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.triggered.connect(self.closeEvent)
filemenu.addAction(exitAction)
MainWindow.setMenuBar(self.menubar)
# ==============================================================================================================
# STATUS BAR
# ==============================================================================================================
self.statusbar = QtGui.QStatusBar(MainWindow)
font2 = QtGui.QFont("Calibri", 15)
#self.statusbar.setGeometry(QtCore.QRect(0, 600, 1027, 35))
self.statusbar.setStyleSheet("background-color: darkCyan")
self.connection_status_lab = QtGui.QLabel('Connection status: ')
self.connection_status_lab.setFont(font2)
self.connection_status = QtGui.QLabel('Disconnected ')
self.connection_status.setFont(font2)
self.statusbar.addPermanentWidget(self.connection_status_lab)
self.statusbar.addPermanentWidget(self.connection_status)
self.display_status_lab = QtGui.QLabel('Display status: ')
self.display_status_lab.setFont(font2)
self.display_status = QtGui.QLabel('Idle ')
self.display_status.setFont(font2)
self.statusbar.addPermanentWidget(self.display_status_lab)
self.statusbar.addPermanentWidget(self.display_status)
self.measurement_status_lab = QtGui.QLabel('Measurement status: ')
self.measurement_status_lab.setFont(font2)
self.measurement_status = QtGui.QLabel(' - ')
self.measurement_status.setFont(font2)
self.statusbar.addPermanentWidget(self.measurement_status_lab)
self.statusbar.addPermanentWidget(self.measurement_status)
self.mouse_pos_lab = QtGui.QLabel('Mouse position: ')
self.mouse_pos_lab.setFont(font2)
self.mouse_pos = QtGui.QLabel(' - ')
self.mouse_pos.setFont(font2)
self.statusbar.addPermanentWidget(self.mouse_pos_lab)
self.statusbar.addPermanentWidget(self.mouse_pos)
MainWindow.setStatusBar(self.statusbar)
def log_scale_callback(self):
if self.log_scale.isChecked():
self.image.setLevels([np.log(200), np.log(16383)])
self.hist.setHistogramRange(np.log(200), np.log(16383))
else:
self.image.setLevels([200, 16383])
self.hist.setHistogramRange(200, 16383)
return
def refresh_image(self):
"""
Shortcut callback. If Ctrl+R is pressed the image scales back to its original range
:return:
"""
self.vb.autoRange()
self.image.update()
return
def load_settings(self):
"""
Load settings from previous session stored in gui_settings.p
:return:
"""
fname = self.path + '\\pco_settings.p'
if os.path.isfile(fname):
return pickle.load(open(fname, 'rb'))
else:
sets = {'ROI position': [696, 520], 'ROI size': 50, 'line position': [[10, 64], [120, 64]],
'exposure times': ['500 us', '800 us', '1 ms', '10 ms', '50 ms', '100 ms']}
return sets
def save_settings_return(self):
"""
Save settings before exiting application
:return:
"""
fname = self.path + '\\pco_settings.p'
times = []
for btn in self.exp_time_list:
times.append(btn.text())
self.save_settings['exposure times'] = times
self.save_settings['ROI position'] = self.roi.pos()
self.save_settings['ROI size'] = self.roi.size()
pickle.dump(self.save_settings, open( fname, "wb" ) )
return
def roi_clicked(self):
"""
Callback to press of the ROI button. A rectangular roi will appear on the image corner.
If active the roi will disappear.
:return:
"""
if self.roi.alive:
self.roi.alive = False
self.roi_indicator.setText('-')
self.roi.hide()
else:
self.roi.alive = True
self.roi.show()
return
def crosscut_clicked(self):
"""
Callback to press of the line crosscut button. A line roi will appear on the image corner.
If active the roi will disappear. The crosscut curve will also appear.
:return:
"""
if self.line_roi.alive:
self.line_roi.alive = False
self.gw_crosscut.hide()
self.line_roi.hide()
else:
self.line_roi.alive = True
self.gw_crosscut.show()
self.line_roi.show()
return
def mouseMoved(self, event):
"""
Mouse move callback. It displays the position and value of the mouse on the image on the statusbar, in
the right corner.
:param event: Mouse move event
:return:
"""
point = self.image.mapFromScene(event)
self.x = int(point.x())
self.y = int(point.y())
# return if position out of image bounds
if self.x < 0 or self.y < 0 or self.x > 1392 or self.y > 1040:
return
try:
val = int(self.im[self.x, self.y])
self.mouse_pos.setText('%i , %i : %i'%(self.x, self.y, val))
except:
pass
return
def roi_value(self):
"""
Get data from ROI region and calculate average. The value will be displayed in the
roi indicator label.
:return:
"""
data = self.roi.getArrayRegion(self.im, self.image)
data_a, data_m, data_std = int(np.average(data)), int(np.max(data)), int(np.std(data))
self.roi_indicator.setText('%i, Max: %i, STD: %i'%(data_a, data_m, data_std))
return
def line_roi_value(self):
"""
Get data from line crosscut and plot them in the crosscut curve.
:return:
"""
data = self.line_roi.getArrayRegion(self.im, self.image)
x_data = np.array(range(len(data)))
self.crosscut_curve.setData(x_data, data)
return
def set_gray_max(self):
"""
Set max value of graylevel. For the 14bit image the value is held up to 16383 counts.
:return:
"""
val = self.gray_max_edit.text()
try:
self.gray_max = int(val)
if self.gray_max > 16383:
self.gray_max = 16383
self.gray_max_edit.setText('16383')
self.image.setLevels([self.gray_min, self.gray_max])
self.hist.setHistogramRange(self.gray_min, self.gray_max)
except ValueError:
pass
return
def set_gray_min(self):
"""
Set min value of graylevel. For the 14bit image the value is held down to 0 counts.
:return:
"""
val = self.gray_min_edit.text()
try:
self.gray_min = int(val)
if self.gray_min < 0:
self.gray_min = 0
self.gray_min_edit.setText('0')
self.image.setLevels([self.gray_min, self.gray_max])
self.hist.setHistogramRange(self.gray_min, self.gray_max)
except ValueError:
pass
return
def closeEvent(self, event):
"""
Callback when exiting application. Ensures that camera is disconnected smoothly.
:return:
"""
if self.live_view_bool or self.alive:
self.stop_callback()
if self.connected:
self.connect_camera()
self.save_settings_return()
QtGui.QApplication.closeAllWindows()
QtGui.QApplication.instance().quit()
return
def onActivatedUnits(self, text):
self.u = self.time_unit_dict[text]
return
def onActivatedSave(self, text):
if text == "Save in":
return
which = int(text[-1])-1
what = str(self.t) + ' ' + self.time_units.currentText()
self.exp_time_list[which].setText(what)
return
def onReturnPress(self):
text = self.exp_time_in.text()
t, u = 0, 0
try:
if '.' in text or ',' in text and self.u == 2:
t = int(float(text)*1000)
u = 1
self.t = float(text)
else:
self.t = int(text)
t = self.t
u = self.u
self.camera.exposure_time(t, u)
except ValueError:
pass
return
def connect_camera(self):
"""
Connect to camera. If camera connection returns error report it and
set connected status to False.
:return:
"""
if self.connected:
err = self.camera.close_camera()
self.connected = False
self.ConnectBtn.setText('CONNECT')
self.ConnectBtn.setStyleSheet("background-color: darkCyan")
self.connection_status.setText('Disconnected')
else:
err = self.camera.open_camera()
if not err:
self.connection_status.setText('Error with connection')
return
self.connected = True
self.ConnectBtn.setText('DISCONNECT')
self.ConnectBtn.setStyleSheet("background-color: green")
self.connection_status.setText('Connected')
try:
t, u = self.camera.get_exposure_time()
self.exp_time_in.setText(str(t))
index = self.time_units.findText(u)
if index >= 0:
self.u = self.time_unit_dict[u]
self.time_units.setCurrentIndex(index)
except:
pass
return
def exp_time_callback(self):
"""
Set exposure time
:param event: button press event
:return:
"""
which = self.sender().text()
t, unit = which.split(sep=' ')
unit_initial = unit
try:
if ('.' in t) or (',' in t) and (unit == 'ms'):
self.t = int(float(t)*1000)
unit = 'us'
else:
self.t = int(t)
unit = self.time_unit_dict[unit]
self.camera.exposure_time(self.t, unit)
self.u = self.time_unit_dict[unit_initial]
self.exp_time_in.setText(str(t))
index = self.time_units.findText(unit_initial)
if index >= 0:
self.time_units.setCurrentIndex(index)
except:
pass
return
def live_callback(self):
"""
Starts live view thread
:return:
"""
if self.connected:
if self.alive:
self.stop_callback()
self.LiveBtn.setStyleSheet('background-color: lightGray')
self.LiveBtn.setChecked(False)
else:
self.alive = True
self.LiveBtn.setStyleSheet('background-color: darkCyan')
self.live_thread = Thread(target=self.live_thread_callback)
self.live_thread.setDaemon(True)
self.live_thread.start()
QtCore.QTimer.singleShot(500, self.update_image)
else:
self.display_status.setText('Error with live display')
return
def live_thread_callback(self):
"""
Callback for thread that read images from buffer
:return:
"""
try:
# Arm camera
self.camera.arm_camera()
print('Camera armed')
# Set recording status to 1
self.camera.start_recording()
# Allocate buffers, default=2 buffers
self.camera.allocate_buffer(3)
self.camera._prepare_to_record_to_memory()
self.display_status.setText('Live view.')
self.record_live_thread = Thread(target=self.camera.record_to_memory_2)
print('record thread created')
self.record_live_thread.setDaemon(True)
self.record_live_thread.start()
print('record thread started')
self.live_view_bool = True
"""
Remember to manage all exceptions here. Look it up in PixelFly() class
"""
except:
self.stop_callback()
return
def open_DM(self):
"""
open DM for the response matrix measurement
if self.driver_info['UBS_pointer']<0 , keeping doing so until it is >0
:return:
"""
self.eng = matlab.engine.start_matlab()
# start the DM
(self.error_code, self.driver_info) = self.eng.openDM(6.0, nargout=2)
print(self.driver_info)
if self.error_code != 0:
print('DM error')
self.eng.quit()
elif self.driver_info['USB_pointer'] <= 0:
print('USB_pointer is invalid!!')
self.eng.quit()
def activate_SLM(self):
# get path that file is saved in
self.path = os.path.dirname(os.path.realpath("__file__"))
# find connected screens
screens = win32api.EnumDisplayMonitors()
if len(screens) < 2:
raise UserWarning('No second screen connected')
self.hdc = screens[1][0] # handle of second screen PYHANDLE
self.dim = screens[1][2] # screen dimensions
self.left = self.dim[0]
self.top = self.dim[1]
self.right = self.dim[2]
self.bottom = self.dim[3]
self.width = abs(self.right - self.left)
self.height = abs(self.bottom - self.top)
if self.width == 1024:
self.SLM_type = "LC-2012"
else:
self.SLM_type = "Pluto"
self.size = (self.width, self.height)
self.dimensions = (self.width, self.height, 3)
# set Windows ENVIRONMENTAL VARIABLE of SDL window position to the top-left corner
# of the second screen
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (self.left, self.top)
pygame.display.init()
self.screen = pygame.display.set_mode(self.size)
# create surface object
self.surface = pygame.surfarray.make_surface(np.zeros(self.dimensions, dtype=np.uint8))
self.screen.blit(self.surface, (0, 0))
pygame.display.flip()
# read generated phase map
self.SLM_map = []
for index in range(4):
file_name = self.file_address.text()+str(index)+'.fits'
self.SLM_map.append(self.import_phase_map(file_name))
def import_phase_map(self, file):
"""
Imports an phase map stored in a file and saves it in an
numpy array for use on the SLM
:param file: A .txt or .fits file of a phase map
:return: write an 8bit array of the correct dimensions in maps dict
"""
hdu = fits.open(file)
p = hdu[0].data
m = np.zeros((self.width, self.height, 3), dtype=np.uint8)
m[:, :, 0] = p
m[:, :, 1] = p
m[:, :, 2] = p
return m
def draw(self, p):
"""
Draw array onto second screen.
:param p: p is an uint8 numpy array with the correct dimensions
:return:
"""
surface = pygame.surfarray.make_surface(p)
self.screen.blit(surface, (0, 0))
pygame.display.flip()
return
def update_image(self):
"""
Takes images from camera queue and displays them. If roi or crosscut is enabled, it updates the
respective values/plot. The consumer loop works using the QtCore.QTimer.singleShot() method,
that fires the function every x ms until it is interrupted.
:return:
"""
if not self.alive and (not self.camera.armed):
if self.live_view_bool:
self.live_view_bool = False
time.sleep(0.1)
self.record_live_thread.join()
self.live_thread.join()
del self.record_live_thread
del self.live_thread
self.display_status.setText('Idle')
return
try:
# get newest frame from queue. Transpose it so that is fits the coordinates convention
im = self.camera.q.get().T
# check for log scale display
if self.log_scale.isChecked():
self.im = np.log(im)
else:
self.im = im
try:
# get max value from queue
max_val = self.camera.q_m.get()
self.max_indicator.setText(str(max_val))
except Empty:
pass
# set new image data, with options autoLevels=False so that it doesn't change the grayvalues
# autoRange=False so that it stays at the zoom level we want and autoHistogram=False so that it does
# not interfere with the axis of the histogram
self.image.setImage(self.im, autoLevels=False, autoRange=False, autoHistogramRange=False,
autoDownsample=True)
# if roi button is clicked
if self.roi.alive:
self.roi_value()
# if crosscut line is clicked
if self.line_roi.alive:
self.line_roi_value()
# mouse position value update
if 0 <= self.x <= 1392 and 0 <= self.y <= 1040:
val = im[self.x, self.y]
self.mouse_pos.setText('%i , %i : %.1f' % (self.x, self.y, val))
# update image. Don't know if this is necessary..
self.image.update()
except Empty:
pass
# Run single shot timer again
QtCore.QTimer.singleShot(20, self.update_image)
def Measure_ResponseMatrix(self):
"""
Record the images of the first 15 Zernike terms with Zernike_coefficient = self.Zernike_coef
:return:
"""
if not self.connected:
return
# check if camera had already been armed
if self.alive and self.live_view_bool:
try:
self.live_view_bool = False
self.camera.live = False # stop loop that is producing frames
#time.sleep(1)
self.record_live_thread.join()
self.live_thread.join()
del self.record_live_thread
del self.live_thread
except:
pass
elif not self.alive:
self.alive = True
self.camera.arm_camera()
self.camera.start_recording()
self.camera.allocate_buffer()
self.camera._prepare_to_record_to_memory()
self.alive = True
else:
pass
try:
num_of_frames = int(self.FramesLab.text())
self.Z_coef = int(self.Zernike_coef.text())
except ValueError:
return
# probably there is a conflict between the camera and DM USB connections, so the 'USB_pointer' in the self.driver_info
# is negative, we need to read the self.driver_info from DM_GUI
#self.driver_info = np.load('current_DMdriver_info.npy').item()
RM_filename = QtGui.QFileDialog.getSaveFileName(self, 'Save as..', self.save_dir)
self.save_dir = os.path.dirname(RM_filename)
for Zernike_term in range(1,28): # 11 for first 10 Zernike terms
#Zernike_coefficient = [0] * 15 # initialize coefficient for the first 15 Zernike terms
Zernike_coefficient =[0, 1.00143624463888, -51.5249359080095, -21.9775999768718, 0.717759222249538, 15.9050351311175, 27.0075244428022, -10.7205195257083, -2.01254461443215, -7.52510423390856, 10.8157068054894, -3.02134384531165, 8.75467126995794, -4.83968407700889, 10.0627952531622, 8.69361225696698, -0.00569977528614984, -0.147583443076208, -2.30484489161088, 0.982991691361985, -0.857744799462948, 4.92455332433157, -10.3405736785842, -0.702115323493294, 0.256143402391469, 2.20035061964973, 7.63689639452587, -2.43586089370370]
#
# for 11X11 # initialize coefficient for the first 15 Zernike terms to get a flat image for 11X11 pupil
Zernike_coefficient[Zernike_term] = Zernike_coefficient[Zernike_term] - 25
for index_loop in range(0,9):
Zernike_coefficient[Zernike_term] = Zernike_coefficient[Zernike_term] + self.Z_coef
vars_str = str(-20+abs(index_loop * self.Z_coef))
if -1500 <= sum(Zernike_coefficient) <= 1500:
Zernike_coefficientM = matlab.double(Zernike_coefficient)
(Z_shape, Zvol_map) = self.eng.apply_zernike(Zernike_coefficientM, nargout=2)
Zvol_map = [one for [one] in Zvol_map]
Zvol_map_Mat = matlab.double(Zvol_map)
self.error_code = self.eng.updateDM(self.driver_info, Zvol_map_Mat)
if self.error_code == 0:
time.sleep(0.1)
else:
return
for phase_shift in range(0,2):
# phase_shift <4 for phase shifting method
# phase_shift <2 for standard Zernike method
self.draw(self.SLM_map[phase_shift])
time.sleep(0.1)
hdu = fits.HDUList() # initialize fits object
record_data = []
record_data = self.camera.record_to_memory(num_of_frames) / 4 # :4 to make it 14 bit
if record_data == []:
self.stop_callback()
return
hdu.append(fits.PrimaryHDU(data=record_data))
# other header details will come in here
hdu[0].header['EXPTIME'] = "%i %s" % (self.t, self.time_units.currentText())
if index_loop<4:
hdu.writeto(RM_filename +'N'+ vars_str[1:]+'_Z'+ str(Zernike_term+1) + '_' + str(phase_shift) + 'Pi.fits')
else:
hdu.writeto(RM_filename + vars_str + '_Z' + str(Zernike_term+1) + '_' + str(phase_shift) + 'Pi.fits')
self.stop_callback()
print('DONE!!!')
return None
def close_DM(self):
"""
close DM after the response Matrix measurement
"""
if self.driver_info != None:
self.error_code = self.eng.closeDM(self.driver_info)
self.eng.quit()
def record_callback(self):
"""
Record specified number of frames
:return:
"""
if not self.connected:
return
# check if camera had already been armed
if self.alive and self.live_view_bool:
try:
self.live_view_bool = False
self.camera.live = False # stop loop that is producing frames
time.sleep(0.1)
self.record_live_thread.join()
self.live_thread.join()
del self.record_live_thread
del self.live_thread
except:
pass
elif not self.alive:
self.alive = True
self.camera.arm_camera()
self.camera.start_recording()
self.camera.allocate_buffer()
self.camera._prepare_to_record_to_memory()
self.alive = True
else:
pass
hdu = fits.HDUList() # initialize fits object
filename = QtGui.QFileDialog.getSaveFileName(self, 'Save as..', self.save_dir)
self.save_dir = os.path.dirname(filename)
print(filename)
num_of_frames = 10
try:
num_of_frames = int(self.FramesLab.text())
except ValueError:
return
self.measurement_status.setText('Recording %d frames..'%num_of_frames)
record_data = []
record_data = self.camera.record_to_memory(num_of_frames)/4 # :4 to make it 14 bit
if record_data == []:
self.stop_callback()
return
self.measurement_status.setText('Exporting to FITS file')
hdu.append(fits.PrimaryHDU(data=record_data))
# other header details will come in here
hdu[0].header['EXPTIME'] = "%i %s" % (self.t, self.time_units.currentText())
hdu.writeto(filename+'.fits')
self.measurement_status.setText('Recording finished.')
self.stop_callback()
return None
def stop_callback(self):
"""
Stops live preview
:return:
"""
self.alive = False
if self.live_view_bool:
self.live_view_bool = False
self.camera.live = False # stop loop that is producing frames
time.sleep(0.1)
self.record_live_thread.join()
self.live_thread.join()
del self.record_live_thread
del self.live_thread
# disarm camera
self.camera.disarm_camera()
self.display_status.setText('Idle')
self.measurement_status.setText('')
return
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = QtGui.QMainWindow()
window.setWindowTitle('PCO.PixelFly -ETH Zurich- ')
try:
icon = QtGui.QIcon('App.ico')
window.setWindowIcon(icon)
except:
pass
pco_ui = CameraWidget(parent=None)
pco_ui.create_gui(window)
window.show()
sys.exit(app.exec_())
| [
"pyqtgraph.PlotCurveItem",
"PyQt4.QtGui.QMainWindow",
"pyqtgraph.PlotItem",
"numpy.log",
"PyQt4.QtGui.QPushButton",
"PyQt4.QtGui.QLabel",
"time.sleep",
"PyQt4.QtCore.QTimer.singleShot",
"PyQt4.QtGui.QKeySequence",
"PyQt4.QtGui.QWidget.__init__",
"PyQt4.QtGui.QStatusBar",
"PyQt4.QtGui.QApplicat... | [((44330, 44358), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (44348, 44358), False, 'from PyQt4 import QtCore, QtGui\n'), ((44373, 44392), 'PyQt4.QtGui.QMainWindow', 'QtGui.QMainWindow', ([], {}), '()\n', (44390, 44392), False, 'from PyQt4 import QtCore, QtGui\n'), ((1193, 1229), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1215, 1229), False, 'from PyQt4 import QtCore, QtGui\n'), ((1355, 1374), 'core.pco_definitions.PixelFly', 'PixelFly', (['self.path'], {}), '(self.path)\n', (1363, 1374), False, 'from core.pco_definitions import PixelFly\n'), ((2113, 2138), 'PyQt4.QtGui.QWidget', 'QtGui.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (2126, 2138), False, 'from PyQt4 import QtCore, QtGui\n'), ((2479, 2498), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (2496, 2498), False, 'from PyQt4 import QtCore, QtGui\n'), ((2583, 2608), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (2606, 2608), True, 'import pyqtgraph as pg\n'), ((2805, 2830), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (2828, 2830), True, 'import pyqtgraph as pg\n'), ((3035, 3054), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (3052, 3054), False, 'from PyQt4 import QtCore, QtGui\n'), ((3411, 3430), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (3428, 3430), False, 'from PyQt4 import QtCore, QtGui\n'), ((3841, 3869), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""CONNECT"""'], {}), "('CONNECT')\n", (3858, 3869), False, 'from PyQt4 import QtCore, QtGui\n'), ((4016, 4035), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (4033, 4035), False, 'from PyQt4 import QtCore, QtGui\n'), ((4306, 4323), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""1"""'], {}), "('1')\n", (4318, 4323), False, 'from PyQt4 import QtCore, QtGui\n'), ((4347, 4364), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""2"""'], {}), "('2')\n", (4359, 4364), False, 'from PyQt4 import QtCore, QtGui\n'), ((4388, 4405), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""3"""'], {}), "('3')\n", (4400, 4405), False, 'from PyQt4 import QtCore, QtGui\n'), ((4429, 4446), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""4"""'], {}), "('4')\n", (4441, 4446), False, 'from PyQt4 import QtCore, QtGui\n'), ((4470, 4487), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""5"""'], {}), "('5')\n", (4482, 4487), False, 'from PyQt4 import QtCore, QtGui\n'), ((4511, 4528), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""6"""'], {}), "('6')\n", (4523, 4528), False, 'from PyQt4 import QtCore, QtGui\n'), ((4555, 4590), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['preset_values[0]'], {}), '(preset_values[0])\n', (4572, 4590), False, 'from PyQt4 import QtCore, QtGui\n'), ((4617, 4652), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['preset_values[1]'], {}), '(preset_values[1])\n', (4634, 4652), False, 'from PyQt4 import QtCore, QtGui\n'), ((4679, 4714), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['preset_values[2]'], {}), '(preset_values[2])\n', (4696, 4714), False, 'from PyQt4 import QtCore, QtGui\n'), ((4741, 4776), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['preset_values[3]'], {}), '(preset_values[3])\n', (4758, 4776), False, 'from PyQt4 import QtCore, QtGui\n'), ((4803, 4838), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['preset_values[4]'], {}), '(preset_values[4])\n', (4820, 4838), False, 'from PyQt4 import QtCore, QtGui\n'), ((4865, 4900), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['preset_values[5]'], {}), '(preset_values[5])\n', (4882, 4900), False, 'from PyQt4 import QtCore, QtGui\n'), ((4933, 4971), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Exposure time controls"""'], {}), "('Exposure time controls')\n", (4945, 4971), False, 'from PyQt4 import QtCore, QtGui\n'), ((6103, 6120), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', ([], {}), '()\n', (6118, 6120), False, 'from PyQt4 import QtCore, QtGui\n'), ((6175, 6192), 'PyQt4.QtGui.QComboBox', 'QtGui.QComboBox', ([], {}), '()\n', (6190, 6192), False, 'from PyQt4 import QtCore, QtGui\n'), ((6273, 6290), 'PyQt4.QtGui.QComboBox', 'QtGui.QComboBox', ([], {}), '()\n', (6288, 6290), False, 'from PyQt4 import QtCore, QtGui\n'), ((6602, 6621), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (6619, 6621), False, 'from PyQt4 import QtCore, QtGui\n'), ((6722, 6756), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Recording controls"""'], {}), "('Recording controls')\n", (6734, 6756), False, 'from PyQt4 import QtCore, QtGui\n'), ((6938, 6963), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""LIVE"""'], {}), "('LIVE')\n", (6955, 6963), False, 'from PyQt4 import QtCore, QtGui\n'), ((7150, 7177), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""RECORD"""'], {}), "('RECORD')\n", (7167, 7177), False, 'from PyQt4 import QtCore, QtGui\n'), ((7262, 7287), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""STOP"""'], {}), "('STOP')\n", (7279, 7287), False, 'from PyQt4 import QtCore, QtGui\n'), ((7355, 7390), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""# frames to record:"""'], {}), "('# frames to record:')\n", (7367, 7390), False, 'from PyQt4 import QtCore, QtGui\n'), ((7493, 7510), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', ([], {}), '()\n', (7508, 7510), False, 'from PyQt4 import QtCore, QtGui\n'), ((9628, 9647), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (9645, 9647), False, 'from PyQt4 import QtCore, QtGui\n'), ((9776, 9804), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""open DM"""'], {}), "('open DM')\n", (9793, 9804), False, 'from PyQt4 import QtCore, QtGui\n'), ((9870, 9906), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""RESPONSE MATRIX"""'], {}), "('RESPONSE MATRIX')\n", (9887, 9906), False, 'from PyQt4 import QtCore, QtGui\n'), ((9980, 10009), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""close DM"""'], {}), "('close DM')\n", (9997, 10009), False, 'from PyQt4 import QtCore, QtGui\n'), ((10077, 10103), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Amplitude:"""'], {}), "('Amplitude:')\n", (10089, 10103), False, 'from PyQt4 import QtCore, QtGui\n'), ((10133, 10150), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', ([], {}), '()\n', (10148, 10150), False, 'from PyQt4 import QtCore, QtGui\n'), ((10794, 10813), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (10811, 10813), False, 'from PyQt4 import QtCore, QtGui\n'), ((10936, 10971), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Initialize SLM"""'], {}), "('Initialize SLM')\n", (10953, 10971), False, 'from PyQt4 import QtCore, QtGui\n'), ((11235, 11256), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""file:"""'], {}), "('file:')\n", (11247, 11256), False, 'from PyQt4 import QtCore, QtGui\n'), ((11379, 11396), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', ([], {}), '()\n', (11394, 11396), False, 'from PyQt4 import QtCore, QtGui\n'), ((12152, 12164), 'pyqtgraph.ViewBox', 'pg.ViewBox', ([], {}), '()\n', (12162, 12164), True, 'import pyqtgraph as pg\n'), ((12779, 12793), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (12791, 12793), True, 'import pyqtgraph as pg\n'), ((12951, 13003), 'pyqtgraph.HistogramLUTItem', 'pg.HistogramLUTItem', (['self.image'], {'fillHistogram': '(False)'}), '(self.image, fillHistogram=False)\n', (12970, 13003), True, 'import pyqtgraph as pg\n'), ((13106, 13128), 'numpy.zeros', 'np.zeros', (['(1392, 1040)'], {}), '((1392, 1040))\n', (13114, 13128), True, 'import numpy as np\n'), ((13554, 13642), 'pyqtgraph.ROI', 'pg.ROI', ([], {'pos': "self.save_settings['ROI position']", 'size': "self.save_settings['ROI size']"}), "(pos=self.save_settings['ROI position'], size=self.save_settings[\n 'ROI size'])\n", (13560, 13642), True, 'import pyqtgraph as pg\n'), ((13945, 13997), 'pyqtgraph.LineSegmentROI', 'pg.LineSegmentROI', (['[[680, 520], [720, 520]]'], {'pen': '"""r"""'}), "([[680, 520], [720, 520]], pen='r')\n", (13962, 13997), True, 'import pyqtgraph as pg\n'), ((14181, 14194), 'pyqtgraph.PlotItem', 'pg.PlotItem', ([], {}), '()\n', (14192, 14194), True, 'import pyqtgraph as pg\n'), ((14283, 14301), 'pyqtgraph.PlotCurveItem', 'pg.PlotCurveItem', ([], {}), '()\n', (14299, 14301), True, 'import pyqtgraph as pg\n'), ((15018, 15049), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Reset zoom"""'], {}), "('Reset zoom')\n", (15035, 15049), False, 'from PyQt4 import QtCore, QtGui\n'), ((15170, 15198), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['"""Log scale"""'], {}), "('Log scale')\n", (15185, 15198), False, 'from PyQt4 import QtCore, QtGui\n'), ((15766, 15791), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Max value"""'], {}), "('Max value')\n", (15778, 15791), False, 'from PyQt4 import QtCore, QtGui\n'), ((15808, 15834), 'PyQt4.QtGui.QFont', 'QtGui.QFont', (['"""Calibri"""', '(18)'], {}), "('Calibri', 18)\n", (15819, 15834), False, 'from PyQt4 import QtCore, QtGui\n'), ((16237, 16254), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""-"""'], {}), "('-')\n", (16249, 16254), False, 'from PyQt4 import QtCore, QtGui\n'), ((16348, 16383), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""ROI average counts:"""'], {}), "('ROI average counts:')\n", (16360, 16383), False, 'from PyQt4 import QtCore, QtGui\n'), ((16873, 16893), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Min:"""'], {}), "('Min:')\n", (16885, 16893), False, 'from PyQt4 import QtCore, QtGui\n'), ((16923, 16943), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Max:"""'], {}), "('Max:')\n", (16935, 16943), False, 'from PyQt4 import QtCore, QtGui\n'), ((17447, 17471), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""ROI"""'], {}), "('ROI')\n", (17464, 17471), False, 'from PyQt4 import QtCore, QtGui\n'), ((17499, 17528), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Crosscut"""'], {}), "('Crosscut')\n", (17516, 17528), False, 'from PyQt4 import QtCore, QtGui\n'), ((18182, 18208), 'PyQt4.QtGui.QMenuBar', 'QtGui.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (18196, 18208), False, 'from PyQt4 import QtCore, QtGui\n'), ((18876, 18904), 'PyQt4.QtGui.QStatusBar', 'QtGui.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (18892, 18904), False, 'from PyQt4 import QtCore, QtGui\n'), ((18922, 18948), 'PyQt4.QtGui.QFont', 'QtGui.QFont', (['"""Calibri"""', '(15)'], {}), "('Calibri', 15)\n", (18933, 18948), False, 'from PyQt4 import QtCore, QtGui\n'), ((19124, 19159), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Connection status: """'], {}), "('Connection status: ')\n", (19136, 19159), False, 'from PyQt4 import QtCore, QtGui\n'), ((19245, 19274), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Disconnected """'], {}), "('Disconnected ')\n", (19257, 19274), False, 'from PyQt4 import QtCore, QtGui\n'), ((19495, 19527), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Display status: """'], {}), "('Display status: ')\n", (19507, 19527), False, 'from PyQt4 import QtCore, QtGui\n'), ((19607, 19628), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Idle """'], {}), "('Idle ')\n", (19619, 19628), False, 'from PyQt4 import QtCore, QtGui\n'), ((19844, 19880), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Measurement status: """'], {}), "('Measurement status: ')\n", (19856, 19880), False, 'from PyQt4 import QtCore, QtGui\n'), ((19968, 19987), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['""" - """'], {}), "(' - ')\n", (19980, 19987), False, 'from PyQt4 import QtCore, QtGui\n'), ((20208, 20240), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Mouse position: """'], {}), "('Mouse position: ')\n", (20220, 20240), False, 'from PyQt4 import QtCore, QtGui\n'), ((20310, 20329), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['""" - """'], {}), "(' - ')\n", (20322, 20329), False, 'from PyQt4 import QtCore, QtGui\n'), ((21331, 21352), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (21345, 21352), False, 'import pygame, os, time, pickle\n'), ((26190, 26226), 'PyQt4.QtGui.QApplication.closeAllWindows', 'QtGui.QApplication.closeAllWindows', ([], {}), '()\n', (26224, 26226), False, 'from PyQt4 import QtCore, QtGui\n'), ((31922, 31952), 'win32api.EnumDisplayMonitors', 'win32api.EnumDisplayMonitors', ([], {}), '()\n', (31950, 31952), False, 'import win32api\n'), ((32841, 32862), 'pygame.display.init', 'pygame.display.init', ([], {}), '()\n', (32860, 32862), False, 'import pygame, os, time, pickle\n'), ((32886, 32920), 'pygame.display.set_mode', 'pygame.display.set_mode', (['self.size'], {}), '(self.size)\n', (32909, 32920), False, 'import pygame, os, time, pickle\n'), ((33108, 33129), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (33127, 33129), False, 'import pygame, os, time, pickle\n'), ((33684, 33699), 'astropy.io.fits.open', 'fits.open', (['file'], {}), '(file)\n', (33693, 33699), False, 'from astropy.io import fits\n'), ((33738, 33792), 'numpy.zeros', 'np.zeros', (['(self.width, self.height, 3)'], {'dtype': 'np.uint8'}), '((self.width, self.height, 3), dtype=np.uint8)\n', (33746, 33792), True, 'import numpy as np\n'), ((34085, 34117), 'pygame.surfarray.make_surface', 'pygame.surfarray.make_surface', (['p'], {}), '(p)\n', (34114, 34117), False, 'import pygame, os, time, pickle\n'), ((34170, 34191), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (34189, 34191), False, 'import pygame, os, time, pickle\n'), ((36596, 36643), 'PyQt4.QtCore.QTimer.singleShot', 'QtCore.QTimer.singleShot', (['(20)', 'self.update_image'], {}), '(20, self.update_image)\n', (36620, 36643), False, 'from PyQt4 import QtCore, QtGui\n'), ((38158, 38225), 'PyQt4.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', (['self', '"""Save as.."""', 'self.save_dir'], {}), "(self, 'Save as..', self.save_dir)\n", (38191, 38225), False, 'from PyQt4 import QtCore, QtGui\n'), ((38251, 38279), 'os.path.dirname', 'os.path.dirname', (['RM_filename'], {}), '(RM_filename)\n', (38266, 38279), False, 'import pygame, os, time, pickle\n'), ((42595, 42609), 'astropy.io.fits.HDUList', 'fits.HDUList', ([], {}), '()\n', (42607, 42609), False, 'from astropy.io import fits\n'), ((42658, 42725), 'PyQt4.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', (['self', '"""Save as.."""', 'self.save_dir'], {}), "(self, 'Save as..', self.save_dir)\n", (42691, 42725), False, 'from PyQt4 import QtCore, QtGui\n'), ((42751, 42776), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (42766, 42776), False, 'import pygame, os, time, pickle\n'), ((44495, 44517), 'PyQt4.QtGui.QIcon', 'QtGui.QIcon', (['"""App.ico"""'], {}), "('App.ico')\n", (44506, 44517), False, 'from PyQt4 import QtCore, QtGui\n'), ((1267, 1295), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (1283, 1295), False, 'import pygame, os, time, pickle\n'), ((14899, 14927), 'PyQt4.QtGui.QKeySequence', 'QtGui.QKeySequence', (['"""Ctrl+R"""'], {}), "('Ctrl+R')\n", (14917, 14927), False, 'from PyQt4 import QtCore, QtGui\n'), ((16291, 16317), 'PyQt4.QtGui.QFont', 'QtGui.QFont', (['"""Calibri"""', '(18)'], {}), "('Calibri', 18)\n", (16302, 16317), False, 'from PyQt4 import QtCore, QtGui\n'), ((16419, 16445), 'PyQt4.QtGui.QFont', 'QtGui.QFont', (['"""Calibri"""', '(18)'], {}), "('Calibri', 18)\n", (16430, 16445), False, 'from PyQt4 import QtCore, QtGui\n'), ((18362, 18385), 'PyQt4.QtGui.QIcon', 'QtGui.QIcon', (['"""exit.png"""'], {}), "('exit.png')\n", (18373, 18385), False, 'from PyQt4 import QtCore, QtGui\n'), ((30660, 30705), 'threading.Thread', 'Thread', ([], {'target': 'self.camera.record_to_memory_2'}), '(target=self.camera.record_to_memory_2)\n', (30666, 30705), False, 'from threading import Thread\n'), ((31839, 31867), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (31855, 31867), False, 'import pygame, os, time, pickle\n'), ((33008, 33049), 'numpy.zeros', 'np.zeros', (['self.dimensions'], {'dtype': 'np.uint8'}), '(self.dimensions, dtype=np.uint8)\n', (33016, 33049), True, 'import numpy as np\n'), ((43325, 43358), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'record_data'}), '(data=record_data)\n', (43340, 43358), False, 'from astropy.io import fits\n'), ((43947, 43962), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (43957, 43962), False, 'import pygame, os, time, pickle\n'), ((16003, 16018), 'numpy.max', 'np.max', (['self.im'], {}), '(self.im)\n', (16009, 16018), True, 'import numpy as np\n'), ((20722, 20733), 'numpy.log', 'np.log', (['(200)'], {}), '(200)\n', (20728, 20733), True, 'import numpy as np\n'), ((20735, 20748), 'numpy.log', 'np.log', (['(16383)'], {}), '(16383)\n', (20741, 20748), True, 'import numpy as np\n'), ((24170, 24186), 'numpy.average', 'np.average', (['data'], {}), '(data)\n', (24180, 24186), True, 'import numpy as np\n'), ((24193, 24205), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (24199, 24205), True, 'import numpy as np\n'), ((24212, 24224), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (24218, 24224), True, 'import numpy as np\n'), ((26236, 26265), 'PyQt4.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (26263, 26265), False, 'from PyQt4 import QtCore, QtGui\n'), ((29779, 29819), 'threading.Thread', 'Thread', ([], {'target': 'self.live_thread_callback'}), '(target=self.live_thread_callback)\n', (29785, 29819), False, 'from threading import Thread\n'), ((29929, 29977), 'PyQt4.QtCore.QTimer.singleShot', 'QtCore.QTimer.singleShot', (['(500)', 'self.update_image'], {}), '(500, self.update_image)\n', (29953, 29977), False, 'from PyQt4 import QtCore, QtGui\n'), ((34714, 34729), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (34724, 34729), False, 'import pygame, os, time, pickle\n'), ((35239, 35249), 'numpy.log', 'np.log', (['im'], {}), '(im)\n', (35245, 35249), True, 'import numpy as np\n'), ((42042, 42057), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (42052, 42057), False, 'import pygame, os, time, pickle\n'), ((20652, 20663), 'numpy.log', 'np.log', (['(200)'], {}), '(200)\n', (20658, 20663), True, 'import numpy as np\n'), ((20665, 20678), 'numpy.log', 'np.log', (['(16383)'], {}), '(16383)\n', (20671, 20678), True, 'import numpy as np\n'), ((39977, 39992), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (39987, 39992), False, 'import pygame, os, time, pickle\n'), ((40331, 40346), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (40341, 40346), False, 'import pygame, os, time, pickle\n'), ((40378, 40392), 'astropy.io.fits.HDUList', 'fits.HDUList', ([], {}), '()\n', (40390, 40392), False, 'from astropy.io import fits\n'), ((40745, 40778), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'record_data'}), '(data=record_data)\n', (40760, 40778), False, 'from astropy.io import fits\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import glob
import shutil
from collections import defaultdict
import numpy as np
import simplejson as json
import argparse
def create_dataset(number: int, directory: str) -> None:
# delete old dataset and create new dataset
if os.path.isdir(directory):
shutil.rmtree(directory)
os.makedirs(directory)
# select images randomly
fns = glob.glob('val2017/*')
fns = [
os.path.basename(x) for x in fns
if x.endswith(('.png', '.jpg'))
]
np.random.shuffle(fns)
fns = sorted(fns[:number])
# convert COCO annotations to the format of `object_detection_metrics`
with open('annotations/instances_val2017.json', 'rt') as rf:
orig_anns = json.load(fp=rf)
bboxes = defaultdict(list)
for ann in orig_anns['annotations']:
image_id = '%012d' % ann['image_id']
bboxes[image_id].append({
'category_id': ann['category_id'],
'bbox': [float(x) for x in ann['bbox']],
})
# copy images and dump annotations
new_anns = list()
for fn in fns:
shutil.copy(f'val2017/{fn}', f'{directory}/{fn}')
image_id = os.path.splitext(fn)[0]
new_anns.append({
'image_id': image_id,
'bboxes': bboxes[image_id]
})
with open(f'{directory}/ground_truths.jsonl', 'wt') as wf:
for ann in new_anns:
wf.write(json.dumps(ann) + '\n')
return
if __name__ == '__main__':
if not (os.path.isdir('val2017') and os.path.isdir('annotations')):
raise SystemError('run `download_coco_val2017.sh` first')
parser = argparse.ArgumentParser(
description='create small dataset from COCO val2017 dataset'
)
parser.add_argument(
'--number', '-n', type=int, default=10,
help='number of images (default: 10)'
)
parser.add_argument(
'--directory', '-d', type=str, default='sample_dataset',
help='directory name (defalt: "sample_dataset")'
)
args = parser.parse_args()
create_dataset(**vars(args))
| [
"os.makedirs",
"argparse.ArgumentParser",
"simplejson.dumps",
"os.path.splitext",
"simplejson.load",
"os.path.isdir",
"collections.defaultdict",
"os.path.basename",
"shutil.copy",
"shutil.rmtree",
"glob.glob",
"numpy.random.shuffle"
] | [((292, 316), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (305, 316), False, 'import os\n'), ((355, 377), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (366, 377), False, 'import os\n'), ((417, 439), 'glob.glob', 'glob.glob', (['"""val2017/*"""'], {}), "('val2017/*')\n", (426, 439), False, 'import glob\n'), ((543, 565), 'numpy.random.shuffle', 'np.random.shuffle', (['fns'], {}), '(fns)\n', (560, 565), True, 'import numpy as np\n'), ((787, 804), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (798, 804), False, 'from collections import defaultdict\n'), ((1655, 1745), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""create small dataset from COCO val2017 dataset"""'}), "(description=\n 'create small dataset from COCO val2017 dataset')\n", (1678, 1745), False, 'import argparse\n'), ((326, 350), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {}), '(directory)\n', (339, 350), False, 'import shutil\n'), ((460, 479), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (476, 479), False, 'import os\n'), ((757, 773), 'simplejson.load', 'json.load', ([], {'fp': 'rf'}), '(fp=rf)\n', (766, 773), True, 'import simplejson as json\n'), ((1124, 1173), 'shutil.copy', 'shutil.copy', (['f"""val2017/{fn}"""', 'f"""{directory}/{fn}"""'], {}), "(f'val2017/{fn}', f'{directory}/{fn}')\n", (1135, 1173), False, 'import shutil\n'), ((1193, 1213), 'os.path.splitext', 'os.path.splitext', (['fn'], {}), '(fn)\n', (1209, 1213), False, 'import os\n'), ((1516, 1540), 'os.path.isdir', 'os.path.isdir', (['"""val2017"""'], {}), "('val2017')\n", (1529, 1540), False, 'import os\n'), ((1545, 1573), 'os.path.isdir', 'os.path.isdir', (['"""annotations"""'], {}), "('annotations')\n", (1558, 1573), False, 'import os\n'), ((1440, 1455), 'simplejson.dumps', 'json.dumps', (['ann'], {}), '(ann)\n', (1450, 1455), True, 'import simplejson as json\n')] |
""" Here the plots and visualizations
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def plt_arima_forecast(y, forecasts, length_for_training=None,
conf_int=False,
title='Country name here',
y_label='Deaths',
x=None,
save_here='arima_case.png',
show_plot = False):
"""
:param y: real vualues
:param forecasts: predicted values
:param length_for_training: like 90% lenght of y
:param save_here: str where to save.
:return:
"""
if not (isinstance(length_for_training, int) | isinstance(length_for_training, float)):
length_for_training = forecasts.__len__()
print("WARNING: please use an int or float for forecasting length. Setting this to:", length_for_training)
plt.clf()
if x is None:
x = np.arange(y.shape[0])
plt.plot(x, y, 'b*--', label='Real')
plt.plot(x[length_for_training:], forecasts, 'go--', label='Forecast')
plt.xlabel('Date')
plt.title(title)
plt.ylabel(y_label)
if conf_int is not False:
plt.fill_between(x[length_for_training:],
conf_int[:, 0], conf_int[:, 1],
alpha=0.1, color='b')
plt.legend(loc='upper left')
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(save_here)
if show_plot:
plt.show()
else:
plt.clf()
return plt
def plt_arima_forecast_outsample(y, forecasts, lenght_for_forecast=None, conf_int=False,
title='Country name here',
y_label='Deaths',
x=None,
save_here='arima_case.png',
show_plot = False):
"""
:param y: real vualues
:param forecasts: predicted values
:param lenght_for_forecast: like 10% length of y
:param save_here: str where to save.
:return:
"""
if not (isinstance(lenght_for_forecast, int) | isinstance(lenght_for_forecast, float)):
lenght_for_forecast = forecasts.__len__()
print("WARNING: please use an int or float for forecasting length. Setting this to:", lenght_for_forecast)
plt.clf()
if x is None:
x = np.arange(y.shape[0])
plt.plot(x[:y.__len__()], y, 'b*--', label='Real')
plt.plot(x[-lenght_for_forecast:], forecasts, 'go--', label='Forecast')
plt.xlabel('Date')
plt.title(title)
plt.ylabel(y_label)
if conf_int is not False:
plt.fill_between(x[-lenght_for_forecast:],
conf_int[:, 0], conf_int[:, 1],
alpha=0.1, color='b')
plt.legend(loc='upper left')
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig(save_here, dpi=1000)
if show_plot:
plt.show()
else:
plt.clf()
return None
def render_pic_in_notebook(location_file = '../outputs/arima/forecast_next_3days_Spain_Deaths.png',
set_size_inches=(19,9)):
"""making notebook more visual"""
img = mpimg.imread(location_file)
# from now on you can use img as an image, but make sure you know what you are doing!
plt.imshow(img) # used to be assigned to imgplot=
plt.gcf().set_size_inches(set_size_inches[0], set_size_inches[1])
plt.axis('off')
plt.show() | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"matplotlib.image.imread",
"matplotlib.pyplot.axis",
"matplot... | [((898, 907), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (905, 907), True, 'import matplotlib.pyplot as plt\n'), ((964, 1000), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b*--"""'], {'label': '"""Real"""'}), "(x, y, 'b*--', label='Real')\n", (972, 1000), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1075), 'matplotlib.pyplot.plot', 'plt.plot', (['x[length_for_training:]', 'forecasts', '"""go--"""'], {'label': '"""Forecast"""'}), "(x[length_for_training:], forecasts, 'go--', label='Forecast')\n", (1013, 1075), True, 'import matplotlib.pyplot as plt\n'), ((1080, 1098), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (1090, 1098), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1119), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1112, 1119), True, 'import matplotlib.pyplot as plt\n'), ((1124, 1143), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (1134, 1143), True, 'import matplotlib.pyplot as plt\n'), ((1332, 1360), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1342, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1388), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (1375, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1393, 1411), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1409, 1411), True, 'import matplotlib.pyplot as plt\n'), ((1416, 1438), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_here'], {}), '(save_here)\n', (1427, 1438), True, 'import matplotlib.pyplot as plt\n'), ((2276, 2285), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2283, 2285), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2468), 'matplotlib.pyplot.plot', 'plt.plot', (['x[-lenght_for_forecast:]', 'forecasts', '"""go--"""'], {'label': '"""Forecast"""'}), "(x[-lenght_for_forecast:], forecasts, 'go--', label='Forecast')\n", (2405, 2468), True, 'import matplotlib.pyplot as plt\n'), ((2473, 2491), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (2483, 2491), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2512), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2505, 2512), True, 'import matplotlib.pyplot as plt\n'), ((2517, 2536), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (2527, 2536), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2754), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2736, 2754), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2782), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (2769, 2782), True, 'import matplotlib.pyplot as plt\n'), ((2787, 2805), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2803, 2805), True, 'import matplotlib.pyplot as plt\n'), ((2810, 2842), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_here'], {'dpi': '(1000)'}), '(save_here, dpi=1000)\n', (2821, 2842), True, 'import matplotlib.pyplot as plt\n'), ((3125, 3152), 'matplotlib.image.imread', 'mpimg.imread', (['location_file'], {}), '(location_file)\n', (3137, 3152), True, 'import matplotlib.image as mpimg\n'), ((3247, 3262), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (3257, 3262), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3387), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3380, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3402), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3400, 3402), True, 'import matplotlib.pyplot as plt\n'), ((938, 959), 'numpy.arange', 'np.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (947, 959), True, 'import numpy as np\n'), ((1182, 1281), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x[length_for_training:]', 'conf_int[:, 0]', 'conf_int[:, 1]'], {'alpha': '(0.1)', 'color': '"""b"""'}), "(x[length_for_training:], conf_int[:, 0], conf_int[:, 1],\n alpha=0.1, color='b')\n", (1198, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1475), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1473, 1475), True, 'import matplotlib.pyplot as plt\n'), ((1494, 1503), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1501, 1503), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2337), 'numpy.arange', 'np.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (2325, 2337), True, 'import numpy as np\n'), ((2575, 2675), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x[-lenght_for_forecast:]', 'conf_int[:, 0]', 'conf_int[:, 1]'], {'alpha': '(0.1)', 'color': '"""b"""'}), "(x[-lenght_for_forecast:], conf_int[:, 0], conf_int[:, 1],\n alpha=0.1, color='b')\n", (2591, 2675), True, 'import matplotlib.pyplot as plt\n'), ((2869, 2879), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2877, 2879), True, 'import matplotlib.pyplot as plt\n'), ((2898, 2907), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2905, 2907), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3311), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3309, 3311), True, 'import matplotlib.pyplot as plt\n')] |
from functools import partial
import numpy as np
from thequickmath.field import read_field, write_field
def localise_random_field(random_field_path, output_field_path, a_x=30., b_x=10., a_z=10., b_z=10., c=1.):
"""
Makes a localised turbulent spot out of a homogeneously random field located in file random_field_path and
saves it as a Field object in file output_field_path
:param random_field_path: path to the file with a random field
:param output_field_path: path to the output file where the localised random field will be saved
:param a_x: length of the turbulent spot along coordinate x
:param b_x: `length` of the laminar-turbulent interface along coordinate x
:param a_z: length of the turbulent spot along coordinate z
:param b_z: `length` of the laminar-turbulent interface along coordinate z
:return: output_field_path
"""
f, attr = read_field(random_field_path)
window_x = partial(window, a_x, b_x, attr['Lx'])
window_z = partial(window, a_z, b_z, attr['Lz'])
for i, x_ in enumerate(f.space.x):
w_x = window_x(x_)
for k, z_ in enumerate(f.space.z):
w_z = window_z(z_)
f.u[i, :, k] *= c * w_x * w_z
f.v[i, :, k] *= c * w_x * w_z
f.w[i, :, k] *= c * w_x * w_z
write_field(f, attr, output_field_path)
return output_field_path
def window(a, b, L, xi):
"""
Returns the value of the windowing function for coordinate xi of a turbulent spot located in the middle of the
domain
:param a: length of the turbulent spot along coordinate xi
:param b: `length` of the laminar-turbulent interface along coordinate xi
:param xi: value of the coordinate xi
:param L: length of the domain along coordinate xi
"""
return 1./4 * (1. + np.tanh(6.*(a - xi + L/2.)/b + 3.)) * (1. + np.tanh(6.*(a + xi - L/2.)/b + 3.))
| [
"thequickmath.field.write_field",
"numpy.tanh",
"functools.partial",
"thequickmath.field.read_field"
] | [((898, 927), 'thequickmath.field.read_field', 'read_field', (['random_field_path'], {}), '(random_field_path)\n', (908, 927), False, 'from thequickmath.field import read_field, write_field\n'), ((943, 980), 'functools.partial', 'partial', (['window', 'a_x', 'b_x', "attr['Lx']"], {}), "(window, a_x, b_x, attr['Lx'])\n", (950, 980), False, 'from functools import partial\n'), ((996, 1033), 'functools.partial', 'partial', (['window', 'a_z', 'b_z', "attr['Lz']"], {}), "(window, a_z, b_z, attr['Lz'])\n", (1003, 1033), False, 'from functools import partial\n'), ((1304, 1343), 'thequickmath.field.write_field', 'write_field', (['f', 'attr', 'output_field_path'], {}), '(f, attr, output_field_path)\n', (1315, 1343), False, 'from thequickmath.field import read_field, write_field\n'), ((1848, 1891), 'numpy.tanh', 'np.tanh', (['(6.0 * (a + xi - L / 2.0) / b + 3.0)'], {}), '(6.0 * (a + xi - L / 2.0) / b + 3.0)\n', (1855, 1891), True, 'import numpy as np\n'), ((1804, 1847), 'numpy.tanh', 'np.tanh', (['(6.0 * (a - xi + L / 2.0) / b + 3.0)'], {}), '(6.0 * (a - xi + L / 2.0) / b + 3.0)\n', (1811, 1847), True, 'import numpy as np\n')] |
import numpy as np
from mgcpy.benchmarks.simulations import (circle_sim, cub_sim, exp_sim,
joint_sim, linear_sim, log_sim,
multi_indep_sim, multi_noise_sim,
quad_sim, root_sim, sin_sim,
spiral_sim, square_sim, step_sim,
two_parab_sim, ubern_sim, w_sim)
def gen_data(data_dir="./mgcpy/independence_tests/unit_tests/mgc/data/input/"):
NUM_SAMPLES = 50
NUM_DIMS = 1
def sin_sim_16(x, y): return sin_sim(x, y, period=16*np.pi)
def ellipsis_sim(x, y): return circle_sim(x, y, radius=5)
def square_sim_(x, y): return square_sim(x, y, period=-np.pi/4, indep=True)
simulations = [linear_sim, exp_sim, cub_sim, joint_sim, step_sim,
quad_sim, w_sim, spiral_sim, ubern_sim, log_sim,
root_sim, sin_sim, sin_sim_16, square_sim, two_parab_sim,
circle_sim, ellipsis_sim, square_sim_, multi_noise_sim, multi_indep_sim]
for simulation in simulations:
x, y = simulation(NUM_SAMPLES, NUM_DIMS)
np.savetxt(data_dir + str(simulation.__name__) + "_x.csv", x, delimiter=",")
np.savetxt(data_dir + str(simulation.__name__) + "_y.csv", y, delimiter=",")
def load_results(file_name, results_dir="./mgcpy/independence_tests/unit_tests/mgc/data/mgc/"):
mgc_results = np.genfromtxt(results_dir + file_name, delimiter=',')[1:]
pMGC = mgc_results[:, 0][0]
statMGC = mgc_results[:, 1][0]
pLocalCorr = mgc_results[:, 2:52]
localCorr = mgc_results[:, 52:102]
optimalScale = mgc_results[:, 102:104][0]
return (pMGC, statMGC, pLocalCorr, localCorr, optimalScale)
if __name__ == '__main__':
gen_data()
# print(load_results("linear_sim_res.csv"))
| [
"mgcpy.benchmarks.simulations.square_sim",
"numpy.genfromtxt",
"mgcpy.benchmarks.simulations.sin_sim",
"mgcpy.benchmarks.simulations.circle_sim"
] | [((617, 649), 'mgcpy.benchmarks.simulations.sin_sim', 'sin_sim', (['x', 'y'], {'period': '(16 * np.pi)'}), '(x, y, period=16 * np.pi)\n', (624, 649), False, 'from mgcpy.benchmarks.simulations import circle_sim, cub_sim, exp_sim, joint_sim, linear_sim, log_sim, multi_indep_sim, multi_noise_sim, quad_sim, root_sim, sin_sim, spiral_sim, square_sim, step_sim, two_parab_sim, ubern_sim, w_sim\n'), ((684, 710), 'mgcpy.benchmarks.simulations.circle_sim', 'circle_sim', (['x', 'y'], {'radius': '(5)'}), '(x, y, radius=5)\n', (694, 710), False, 'from mgcpy.benchmarks.simulations import circle_sim, cub_sim, exp_sim, joint_sim, linear_sim, log_sim, multi_indep_sim, multi_noise_sim, quad_sim, root_sim, sin_sim, spiral_sim, square_sim, step_sim, two_parab_sim, ubern_sim, w_sim\n'), ((746, 793), 'mgcpy.benchmarks.simulations.square_sim', 'square_sim', (['x', 'y'], {'period': '(-np.pi / 4)', 'indep': '(True)'}), '(x, y, period=-np.pi / 4, indep=True)\n', (756, 793), False, 'from mgcpy.benchmarks.simulations import circle_sim, cub_sim, exp_sim, joint_sim, linear_sim, log_sim, multi_indep_sim, multi_noise_sim, quad_sim, root_sim, sin_sim, spiral_sim, square_sim, step_sim, two_parab_sim, ubern_sim, w_sim\n'), ((1471, 1524), 'numpy.genfromtxt', 'np.genfromtxt', (['(results_dir + file_name)'], {'delimiter': '""","""'}), "(results_dir + file_name, delimiter=',')\n", (1484, 1524), True, 'import numpy as np\n')] |
import numpy as np
import argparse
def patch_generator(img, patch_size, stride):
h, w, _ = img.shape
patch_t = []
for i in range(0, h-patch_size+1, stride):
for j in range(0, w-patch_size+1, stride):
patch_t.append(img[i: i + patch_size, j: j + patch_size, :])
return patch_t
def data_aug(img, mode=0): # img: W*H
if mode == 0:
return img
elif mode == 1:
return np.flipud(img)
elif mode == 2:
return np.rot90(img, axes=(0, 1))
elif mode == 3:
return np.flipud(np.rot90(img, axes=(0, 1)))
elif mode == 4:
return np.rot90(img, k=2, axes=(0, 1))
elif mode == 5:
return np.flipud(np.rot90(img, k=2, axes=(0, 1)))
elif mode == 6:
return np.rot90(img, k=3, axes=(0, 1))
elif mode == 7:
return np.flipud(np.rot90(img, k=3, axes=(0, 1)))
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y1', '1', 'TRUE'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0', 'FALSE'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
| [
"numpy.rot90",
"argparse.ArgumentTypeError",
"numpy.flipud"
] | [((427, 441), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (436, 441), True, 'import numpy as np\n'), ((1119, 1172), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (1145, 1172), False, 'import argparse\n'), ((477, 503), 'numpy.rot90', 'np.rot90', (['img'], {'axes': '(0, 1)'}), '(img, axes=(0, 1))\n', (485, 503), True, 'import numpy as np\n'), ((549, 575), 'numpy.rot90', 'np.rot90', (['img'], {'axes': '(0, 1)'}), '(img, axes=(0, 1))\n', (557, 575), True, 'import numpy as np\n'), ((612, 643), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(2)', 'axes': '(0, 1)'}), '(img, k=2, axes=(0, 1))\n', (620, 643), True, 'import numpy as np\n'), ((689, 720), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(2)', 'axes': '(0, 1)'}), '(img, k=2, axes=(0, 1))\n', (697, 720), True, 'import numpy as np\n'), ((757, 788), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(3)', 'axes': '(0, 1)'}), '(img, k=3, axes=(0, 1))\n', (765, 788), True, 'import numpy as np\n'), ((834, 865), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(3)', 'axes': '(0, 1)'}), '(img, k=3, axes=(0, 1))\n', (842, 865), True, 'import numpy as np\n')] |
import pdb
import pickle
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
def batch_to_epoch(arr, num, ignore_first=False):
if len(arr) == 0:
return []
i = 0
epoch_vals = []
while i + num <= len(arr):
if ignore_first:
if i == 0:
epoch_vals.append(max(arr[i+1: i+num]))
else:
epoch_vals.append(max(arr[i: i+num]))
else:
epoch_vals.append(max(arr[i: i+num]))
i = i + num
return epoch_vals
def batch_to_epoch_avg(arr, num, ignore_first=False):
if len(arr) == 0:
return []
i = 0
epoch_vals = []
while i + num <= len(arr):
if ignore_first:
if i == 0:
epoch_vals.append(sum(arr[i + 1: i+num]) / (num - 1))
else:
epoch_vals.append(sum(arr[i + 1: i+num]) / num)
else:
epoch_vals.append(sum(arr[i + 1: i+num]) / num)
i = i + num
return epoch_vals
def batch_to_epoch_min(arr, n):
i = 0
epoch_vals = []
while i + n <= len(arr):
epoch_vals.append(min(arr[i: i+n]))
i = i + n
return epoch_vals
def get_values(lrs, num_users=20, epochs=100, model = "mlp", dataset = "mnist", local_bs=10, numbers = [1], index=4):
frac = "1.0"
iid = 1
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
for number in numbers:
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[0], 0, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, index], axis=0)
all_experiments.append(xi_values)
experiments = []
for number in numbers:
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[1], 1, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, index], axis=0)
all_experiments.append(xi_values)
epochs = max(len(all_experiments[0]), len(all_experiments[1]))
return epochs, all_experiments
def get_downlink(lrs, num_users=20, epochs=100, model = "mlp", dataset = "mnist", local_bs=10, numbers = [1], index=4, ignore_first=False, topk_d=0.001, directory=".."):
frac = "1.0"
iid = 1
topk = 0.001
experiments = []
for number in numbers:
file_name = '{}/save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(directory, dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[1], 1, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, index], axis=0)
d = len(xi_values) // 100
xi_values = batch_to_epoch(xi_values, d, ignore_first)
epochs = len(xi_values)
return epochs, xi_values
def get_downlink_batches(lrs, num_users=20, epochs=100, model = "mlp", dataset = "mnist", local_bs=10, numbers = [1], index=4, ignore_first=False, topk_d=0.001):
frac = "1.0"
iid = 1
topk = 0.001
experiments = []
for number in numbers:
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[1], 1, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, index], axis=0)
# all_experiments.append(xi_values)
epochs = len(xi_values)
return epochs, xi_values
def get_xi_values(lrs, num_users=20, epochs=100, model = "mlp", dataset = "mnist", local_bs=10, numbers = [1], index=4, ignore_first=False, topk_d=0.001, iid = 1):
frac = "1.0"
topk = 0.001
all_experiments = []
experiments = []
for number in numbers:
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[0], 0, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, index], axis=0)
d = len(xi_values) // 100
xi_values = batch_to_epoch(xi_values, d, ignore_first)
all_experiments.append(xi_values)
experiments = []
for number in numbers:
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[1], 1, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, index], axis=0)
d = len(xi_values) // 100
xi_values = batch_to_epoch(xi_values, d, ignore_first)
all_experiments.append(xi_values)
epochs = len(all_experiments[0])
return epochs, all_experiments
def get_side_values(lr, num_users=20, epochs=100, model = "mlp", dataset = "mnist", local_bs=10, numbers = [1], direction=0, iid = 1):
frac = "1.0"
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
for number in numbers:
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lr, direction, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values_lhs = np.mean(np.array(experiments)[:, 9], axis=0)
d = len(xi_values_lhs) // 100
xi_values_lhs = batch_to_epoch(xi_values_lhs, d)
all_experiments.append(xi_values_lhs)
xi_values_rhs = np.mean(np.array(experiments)[:, 10], axis=0)
d = len(xi_values_rhs) // 100
xi_values_rhs = batch_to_epoch_min(xi_values_rhs, d)
all_experiments.append(xi_values_rhs)
epochs = len(all_experiments[0])
return epochs, all_experiments
def get_distance(lrs, num_users=20, epochs=100, model = "mlp", dataset = "mnist", local_bs=10, numbers=[4], exception=False):
frac = "1.0"
iid = 1
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
for number in numbers:
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[1], 1, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
lhs_values = np.mean(np.array(experiments)[:, 12], axis=0)
gradient = np.mean(np.array(experiments)[:, 10], axis=0)
a = lhs_values/gradient
d = len(a) // 100
xi_values = batch_to_epoch(a, d)
all_experiments.append(xi_values)
batches = len(all_experiments[0])
return all_experiments
def get_side_values_batches(lr, num_users=20, epochs=100, model = "mlp", dataset = "mnist", local_bs=10, numbers = [1], direction=0):
frac = "1.0"
iid = 1
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
for number in numbers:
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lr, direction, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values_lhs = np.mean(np.array(experiments)[:, 9], axis=0)
all_experiments.append(xi_values_lhs)
xi_values_rhs = np.mean(np.array(experiments)[:, 10], axis=0)
all_experiments.append(xi_values_rhs)
epochs = len(all_experiments[0])
return epochs, all_experiments
def get_model_results(lrs, num_users=20, epochs=100, model = "mlp", dataset = "mnist", local_bs=10, numbers = [1], index_comparison=3, sgddir=0):
frac = "1.0"
iid = 1
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
for num in [1]:
file_name = '../save/{}-{}-tuning/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[0], 0, topk, topk_d, num)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file)[index_comparison])
all_experiments.append(np.average(np.array(experiments), axis=0))
experiments = []
for num in [1]:
file_name = '../save/{}-{}-tuning/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[1], 1, topk, topk_d, num)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file)[index_comparison])
all_experiments.append(np.average(np.array(experiments), axis=0))
experiments = []
for num in [1]:
file_name = '../save/{}-{}-tuning/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sgd", lrs[2], sgddir, topk, topk_d, num)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file)[index_comparison])
all_experiments.append(np.average(np.array(experiments), axis=0))
epochs = len(all_experiments[0])
return epochs, all_experiments
| [
"numpy.array",
"pickle.load"
] | [((1897, 1918), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (1905, 1918), True, 'import numpy as np\n'), ((2460, 2481), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (2468, 2481), True, 'import numpy as np\n'), ((3358, 3379), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (3366, 3379), True, 'import numpy as np\n'), ((4242, 4263), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (4250, 4263), True, 'import numpy as np\n'), ((5096, 5117), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (5104, 5117), True, 'import numpy as np\n'), ((5748, 5769), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (5756, 5769), True, 'import numpy as np\n'), ((6700, 6721), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (6708, 6721), True, 'import numpy as np\n'), ((6896, 6917), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (6904, 6917), True, 'import numpy as np\n'), ((7845, 7866), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (7853, 7866), True, 'import numpy as np\n'), ((7906, 7927), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (7914, 7927), True, 'import numpy as np\n'), ((8856, 8877), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (8864, 8877), True, 'import numpy as np\n'), ((8965, 8986), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (8973, 8986), True, 'import numpy as np\n'), ((9871, 9892), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (9879, 9892), True, 'import numpy as np\n'), ((10416, 10437), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (10424, 10437), True, 'import numpy as np\n'), ((10960, 10981), 'numpy.array', 'np.array', (['experiments'], {}), '(experiments)\n', (10968, 10981), True, 'import numpy as np\n'), ((1847, 1871), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (1858, 1871), False, 'import pickle\n'), ((2409, 2433), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (2420, 2433), False, 'import pickle\n'), ((3307, 3331), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (3318, 3331), False, 'import pickle\n'), ((4191, 4215), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (4202, 4215), False, 'import pickle\n'), ((5046, 5070), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (5057, 5070), False, 'import pickle\n'), ((5697, 5721), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (5708, 5721), False, 'import pickle\n'), ((6646, 6670), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (6657, 6670), False, 'import pickle\n'), ((7793, 7817), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (7804, 7817), False, 'import pickle\n'), ((8802, 8826), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (8813, 8826), False, 'import pickle\n'), ((9788, 9812), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (9799, 9812), False, 'import pickle\n'), ((10333, 10357), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (10344, 10357), False, 'import pickle\n'), ((10877, 10901), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (10888, 10901), False, 'import pickle\n')] |
"""
Ancillary functions on numpy arrays
"""
import numpy as np
def unique_rows(ar: np.ndarray, return_counts: bool= False):
"""unique_rows
Returns the unique rows of a multidimensional array.
:param ar: array_like
Input array.
:param return_counts:
If True, also return the number of times a unique row appears in `ar`.
:return unique_x: ndarray
The sorted unique rows.
:return counts: ndarray, optional
The number of times each unique row appears in `ar`.
"""
if ar.ndim == 1:
return np.unique(ar, return_counts=return_counts)
else:
dtype = np.dtype((np.void, ar.dtype.itemsize * ar.shape[1]))
y = np.ascontiguousarray(ar).view(dtype)
_, idx, counts = np.unique(y, return_index=True, return_counts=True)
unique_x = ar[idx, :]
if return_counts:
return unique_x, counts
else:
return unique_x
def glue_xyz(x, y, z):
"""glue_xyz
Returns an array with `x`, `y` and `z` as columns
:param x: array_like
:param y:
:param z:
:return xyz: ndarray
Array with `x`, `y` and `z` as columns.
"""
if y is not None and z is not None:
xyz = np.column_stack((x, y, z))
elif y is not None and z is None and x.shape[1] == 2:
xyz = np.column_stack((x, y))
else:
xyz = x
return xyz
def get_frequencies(x):
values, counts = unique_rows(x, return_counts=True)
if x.ndim == 1:
frequencies = counts
else:
ndim = x.shape[1]
symbols = [{v: j for j, v in enumerate(np.unique(x[:, i]))}
for i in range(ndim)]
shape = [len(s) for s in symbols]
frequencies = np.zeros(shape)
for value, count in zip(values, counts):
idx = tuple(symbols[i][v] for i, v in enumerate(value))
frequencies[idx] = count
frequencies = frequencies / frequencies.sum()
return frequencies
| [
"numpy.unique",
"numpy.column_stack",
"numpy.ascontiguousarray",
"numpy.zeros",
"numpy.dtype"
] | [((560, 602), 'numpy.unique', 'np.unique', (['ar'], {'return_counts': 'return_counts'}), '(ar, return_counts=return_counts)\n', (569, 602), True, 'import numpy as np\n'), ((629, 681), 'numpy.dtype', 'np.dtype', (['(np.void, ar.dtype.itemsize * ar.shape[1])'], {}), '((np.void, ar.dtype.itemsize * ar.shape[1]))\n', (637, 681), True, 'import numpy as np\n'), ((756, 807), 'numpy.unique', 'np.unique', (['y'], {'return_index': '(True)', 'return_counts': '(True)'}), '(y, return_index=True, return_counts=True)\n', (765, 807), True, 'import numpy as np\n'), ((1225, 1251), 'numpy.column_stack', 'np.column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (1240, 1251), True, 'import numpy as np\n'), ((1729, 1744), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1737, 1744), True, 'import numpy as np\n'), ((1324, 1347), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (1339, 1347), True, 'import numpy as np\n'), ((694, 718), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ar'], {}), '(ar)\n', (714, 718), True, 'import numpy as np\n'), ((1603, 1621), 'numpy.unique', 'np.unique', (['x[:, i]'], {}), '(x[:, i])\n', (1612, 1621), True, 'import numpy as np\n')] |
from gym_torcs import TorcsEnv
import numpy as np
import random
import argparse
from keras.models import model_from_json, Model
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import Adam
import tensorflow as tf
from keras.engine.training import collect_trainable_weights
import json
from ReplayBuffer import ReplayBuffer
from ActorNetworkPreTraining import ActorNetwork # use a modified class
from CriticNetwork import CriticNetwork
from OU import OU
import timeit
import signal
import sys
import time
PI= 3.14159265359
OU = OU() #Ornstein-Uhlenbeck Process
class DriverExample(object):
'''What the driver is intending to do (i.e. send to the server).
Composes something like this for the server:
(accel 1)(brake 0)(gear 1)(steer 0)(clutch 0)(focus 0)(meta 0) or
(accel 1)(brake 0)(gear 1)(steer 0)(clutch 0)(focus -90 -45 0 45 90)(meta 0)'''
def __init__(self):
self.actionstr= unicode()
# "d" is for data dictionary.
self.R= { 'accel':0.2,
'brake':0,
'clutch':0,
'gear':1,
'steer':0,
'focus':[-90,-45,0,45,90],
'meta':0
}
def action(self, s_t):
'''This is only an example. It will get around the track but the
correct thing to do is write your own `drive()` function.'''
target_speed=100
# S: angle, track (19), trackPos, speedX, speedY, speedZ, wheelSpinVel/100.0 (4), rpm
S = {}
# value are processed in gym_torcs.py/make_observation while these are not processed
# in snakeoil3_gym.py. The controller we use is from snakeoil3_gym.py
# Thus, revert back.
S['angle'] = s_t[0] * 3.1416
S['trackPos'] = s_t[20]
S['speedX'] = s_t[21] * 300.
S['wheelSpinVel'] = s_t[24:28]
# Steer To Corner
self.R['steer'] = S['angle']*10 / PI
# Steer To Center
self.R['steer'] -= S['trackPos']*.10
# Throttle Control
if S['speedX'] < target_speed - (self.R['steer']*50):
self.R['accel'] += .01
else:
self.R['accel'] -= .01
if S['speedX']<10:
self.R['accel'] += 1/(S['speedX']+.1)
# Traction Control System
if ((S['wheelSpinVel'][2]+S['wheelSpinVel'][3]) -
(S['wheelSpinVel'][0]+S['wheelSpinVel'][1]) > 5):
self.R['accel']-= .2
self.clip_to_limits() # get rid of absurd values
print("------------------------------------------")
print("angle: ", S['angle'], "speedX: ", S['speedX'], "trackPos: ", S['trackPos'])
print("steer: ", self.R['steer'], "accel: ", self.R['accel'], "brake: ", self.R['brake'])
return [self.R['steer'], self.R['accel'], self.R['brake']]
def clip(self,v,lo,hi):
if v<lo: return lo
elif v>hi: return hi
else: return v
def clip_to_limits(self):
self.R['steer']= self.clip(self.R['steer'], -1, 1)
self.R['brake']= self.clip(self.R['brake'], 0, 1)
self.R['accel']= self.clip(self.R['accel'], 0, 1)
def preTrain(): # train the NN of actor and ciritc using existing rules
BUFFER_SIZE = 100000
BATCH_SIZE = 32
GAMMA = 0.99
TAU = 0.001 #Target Network HyperParameters
LRA = 0.0001 #Learning rate for Actor
LRC = 0.001 #Lerning rate for Critic
action_dim = 3 #Steering/Acceleration/Brake
state_dim = 29 #of sensors input
np.random.seed(1337)
vision = False
episode_count = 2000
max_steps = 100000
reward = 0
done = False
step = 0
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
actor = ActorNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA)
critic = CriticNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE) #Create replay buffer
# Generate a Torcs environment
env = TorcsEnv(vision=vision, throttle=True,gear_change=False)
# Generate a driver
driver = DriverExample()
#Now load the weight
print("Now we load the weight")
try:
actor.model.load_weights("pre_actormodel.h5")
critic.model.load_weights("pre_criticmodel.h5")
actor.target_model.load_weights("pre_actormodel.h5")
critic.target_model.load_weights("pre_criticmodel.h5")
print("Weight load successfully")
except:
print("Cannot find the weight")
print("TORCS Experiment Start.")
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
if np.mod(i, 3) == 0:
ob = env.reset(relaunch=True) #relaunch TORCS every 3 episode because of the memory leak error
else:
ob = env.reset()
s_t = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
total_reward = 0.
for j in range(max_steps):
loss_actor = 0
loss_critic = 0
a_t = np.zeros([1,action_dim])
# the driver produce the actions
a_t = driver.action(s_t.reshape(state_dim, ))
ob, r_t, done, info = env.step(a_t)
s_t1 = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
buff.add(s_t, a_t, r_t, s_t1, done) #Add replay buffer
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
new_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
y_t = np.asarray([e[1] for e in batch])
target_q_values = critic.target_model.predict([new_states, actor.target_model.predict(new_states)])
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
"""
if (train_indicator == 1):
loss += critic.model.train_on_batch([states,actions], y_t)
a_for_grad = actor.model.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
"""
loss_actor += actor.model.train_on_batch(states, actions) # train actor
loss_critic += critic.model.train_on_batch([states,actions], y_t) # train critic
actor.target_train()
critic.target_train()
total_reward += r_t
s_t = s_t1
print("Episode", i, "Step", step, ": ")
print("Action", a_t, "Reward", r_t)
print("loss_actor", loss_actor, "loss_critic", loss_critic)
step += 1
if np.mod(step, 100) == 0:
print("Now we save model")
actor.model.save_weights("pre_actormodel.h5", overwrite=True)
with open("pre_actormodel.json", "w") as outfile:
json.dump(actor.model.to_json(), outfile)
critic.model.save_weights("pre_criticmodel.h5", overwrite=True)
with open("pre_criticmodel.json", "w") as outfile:
json.dump(critic.model.to_json(), outfile)
if done:
break
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
env.end() # This is for shutting down TORCS
print("Finish.")
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
# Generate a Torcs environment
env = TorcsEnv(vision=False, throttle=True, gear_change=False)
env.end()
sys.exit(0)
if __name__ == "__main__":
# if ctrl c is pressed, close env too
signal.signal(signal.SIGINT, signal_handler)
preTrain()
| [
"signal.signal",
"numpy.hstack",
"tensorflow.Session",
"keras.backend.set_session",
"numpy.asarray",
"ActorNetworkPreTraining.ActorNetwork",
"CriticNetwork.CriticNetwork",
"numpy.zeros",
"gym_torcs.TorcsEnv",
"numpy.random.seed",
"sys.exit",
"tensorflow.ConfigProto",
"ReplayBuffer.ReplayBuff... | [((606, 610), 'OU.OU', 'OU', ([], {}), '()\n', (608, 610), False, 'from OU import OU\n'), ((3609, 3629), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (3623, 3629), True, 'import numpy as np\n'), ((3790, 3806), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3804, 3806), True, 'import tensorflow as tf\n'), ((3861, 3886), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3871, 3886), True, 'import tensorflow as tf\n'), ((3926, 3945), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (3939, 3945), True, 'from keras import backend as K\n'), ((3959, 4022), 'ActorNetworkPreTraining.ActorNetwork', 'ActorNetwork', (['sess', 'state_dim', 'action_dim', 'BATCH_SIZE', 'TAU', 'LRA'], {}), '(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA)\n', (3971, 4022), False, 'from ActorNetworkPreTraining import ActorNetwork\n'), ((4036, 4100), 'CriticNetwork.CriticNetwork', 'CriticNetwork', (['sess', 'state_dim', 'action_dim', 'BATCH_SIZE', 'TAU', 'LRC'], {}), '(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)\n', (4049, 4100), False, 'from CriticNetwork import CriticNetwork\n'), ((4112, 4137), 'ReplayBuffer.ReplayBuffer', 'ReplayBuffer', (['BUFFER_SIZE'], {}), '(BUFFER_SIZE)\n', (4124, 4137), False, 'from ReplayBuffer import ReplayBuffer\n'), ((4209, 4266), 'gym_torcs.TorcsEnv', 'TorcsEnv', ([], {'vision': 'vision', 'throttle': '(True)', 'gear_change': '(False)'}), '(vision=vision, throttle=True, gear_change=False)\n', (4217, 4266), False, 'from gym_torcs import TorcsEnv\n'), ((8251, 8307), 'gym_torcs.TorcsEnv', 'TorcsEnv', ([], {'vision': '(False)', 'throttle': '(True)', 'gear_change': '(False)'}), '(vision=False, throttle=True, gear_change=False)\n', (8259, 8307), False, 'from gym_torcs import TorcsEnv\n'), ((8326, 8337), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8334, 8337), False, 'import sys\n'), ((8412, 8456), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (8425, 8456), False, 'import signal\n'), ((5067, 5181), 'numpy.hstack', 'np.hstack', (['(ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.\n wheelSpinVel / 100.0, ob.rpm)'], {}), '((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ,\n ob.wheelSpinVel / 100.0, ob.rpm))\n', (5076, 5181), True, 'import numpy as np\n'), ((4881, 4893), 'numpy.mod', 'np.mod', (['i', '(3)'], {}), '(i, 3)\n', (4887, 4893), True, 'import numpy as np\n'), ((5317, 5342), 'numpy.zeros', 'np.zeros', (['[1, action_dim]'], {}), '([1, action_dim])\n', (5325, 5342), True, 'import numpy as np\n'), ((5527, 5641), 'numpy.hstack', 'np.hstack', (['(ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.\n wheelSpinVel / 100.0, ob.rpm)'], {}), '((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ,\n ob.wheelSpinVel / 100.0, ob.rpm))\n', (5536, 5641), True, 'import numpy as np\n'), ((5830, 5863), 'numpy.asarray', 'np.asarray', (['[e[0] for e in batch]'], {}), '([e[0] for e in batch])\n', (5840, 5863), True, 'import numpy as np\n'), ((5886, 5919), 'numpy.asarray', 'np.asarray', (['[e[1] for e in batch]'], {}), '([e[1] for e in batch])\n', (5896, 5919), True, 'import numpy as np\n'), ((5942, 5975), 'numpy.asarray', 'np.asarray', (['[e[2] for e in batch]'], {}), '([e[2] for e in batch])\n', (5952, 5975), True, 'import numpy as np\n'), ((6001, 6034), 'numpy.asarray', 'np.asarray', (['[e[3] for e in batch]'], {}), '([e[3] for e in batch])\n', (6011, 6034), True, 'import numpy as np\n'), ((6055, 6088), 'numpy.asarray', 'np.asarray', (['[e[4] for e in batch]'], {}), '([e[4] for e in batch])\n', (6065, 6088), True, 'import numpy as np\n'), ((6107, 6140), 'numpy.asarray', 'np.asarray', (['[e[1] for e in batch]'], {}), '([e[1] for e in batch])\n', (6117, 6140), True, 'import numpy as np\n'), ((7377, 7394), 'numpy.mod', 'np.mod', (['step', '(100)'], {}), '(step, 100)\n', (7383, 7394), True, 'import numpy as np\n')] |
import itertools
import numpy as np
import sys
from collections import defaultdict, namedtuple
#Credit:
#https://github.com/dennybritz/reinforcement-learning
EpisodeStats = namedtuple("Stats",["episode_lengths", "episode_rewards"])
def make_epsilon_greedy_policy(Q, epsilon, decay, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation, episode):
e_prime = epsilon * decay ** episode
A = np.ones(nA, dtype=float) * e_prime / nA
if np.all(np.isclose(Q[observation], np.zeros(nA))):
best_action = np.random.randint(nA)
else:
best_action = np.argmax(Q[observation])
A[best_action] += (1.0 - e_prime)
return A
return policy_fn
def make_exploration_function(Rplus, Ne):
"""
Creates an "exploratory" policy (Exploration Function from AIMA chapter 21) based on a given Q-function and epsilon.
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
Nsa: A dictionary that maps state -> number of times an action has been taken
Rplus: Large reward value to assign before iteration limit
Ne: Minimum number of times that each action will be taken at each state
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def exploration_fn(u, n):
if n < Ne:
return Rplus
else:
return u
return np.vectorize(exploration_fn)
def q_learning(env, method, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1, decay=1.0, Rplus=None, Ne=None):
"""
Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy
while following an epsilon-greedy policy
Args:
env: OpenAI environment.
method: ['greedy', 'explore'] whether to use a greedy or an explorative policy
num_episodes: Number of episodes to run for.
discount_factor: Gamma discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
decay: exponential decay rate for epsilon
Rplus: Optimistic reward given to unexplored states
Ne: Minimum number of times that each action will be taken at each state
Returns:
A tuple (Q, episode_lengths).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Keeps track of useful statistics
stats = EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
# Keeps track of how many times we've taken action a in state s
Nsa = defaultdict(lambda: np.zeros(env.action_space.n))
# The policy we're following
if method == 'greedy':
policy = make_epsilon_greedy_policy(Q, epsilon, decay, env.action_space.n)
def get_next_action(state_, episode):
action_probs = policy(state_, episode)
return np.random.choice(np.arange(len(action_probs)), p=action_probs)
elif method == 'explore':
if not Rplus:
Rplus = max(env.reward_range)
if not Ne:
Ne = 100
exploration_fn = make_exploration_function(Rplus, Ne)
done_exploring = False
def get_next_action(state_, episode):
exploration_values = exploration_fn(Q[state_], Nsa[state_])
if np.allclose(exploration_values, exploration_values[0]):
return np.random.randint(env.nA)
else:
return np.argmax(exploration_values)
else:
raise ValueError('Unsupported method type')
for i_episode in range(num_episodes):
# Print out which episode we're on, useful for debugging.
if (i_episode + 1) % 100 == 0:
print("\rEpisode {}/{}.".format(i_episode + 1, num_episodes), end="")
sys.stdout.flush()
# Reset the environment and pick the first action
state = env.reset()
# One step in the environment
# total_reward = 0.0
for t in itertools.count():
t += 1
# Get an action based on the exploration function
action = get_next_action(state, i_episode)
next_state, reward, done, _ = env.step(action)
# Update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
Nsa[state][action] += 1
# TD Update
best_next_action = np.argmax(Q[next_state])
td_target = reward + discount_factor * Q[next_state][best_next_action]
td_delta = td_target - Q[state][action]
Q[state][action] += alpha * td_delta
if method == 'explore' and not done_exploring:
for arr in Nsa.values():
if not np.all(arr >= Ne):
break
else:
done_exploring = True
print('All done with exploration at episode %i, step %i' % (i_episode, t))
if done:
break
state = next_state
final_policy = np.zeros((env.nS, env.nA))
for state in range(env.nS):
final_policy[state] = Q[state]
return Q, stats, Nsa, final_policy | [
"numpy.all",
"collections.namedtuple",
"numpy.allclose",
"numpy.ones",
"numpy.argmax",
"numpy.zeros",
"numpy.random.randint",
"itertools.count",
"sys.stdout.flush",
"numpy.vectorize"
] | [((175, 234), 'collections.namedtuple', 'namedtuple', (['"""Stats"""', "['episode_lengths', 'episode_rewards']"], {}), "('Stats', ['episode_lengths', 'episode_rewards'])\n", (185, 234), False, 'from collections import defaultdict, namedtuple\n'), ((2122, 2150), 'numpy.vectorize', 'np.vectorize', (['exploration_fn'], {}), '(exploration_fn)\n', (2134, 2150), True, 'import numpy as np\n'), ((6089, 6115), 'numpy.zeros', 'np.zeros', (['(env.nS, env.nA)'], {}), '((env.nS, env.nA))\n', (6097, 6115), True, 'import numpy as np\n'), ((5006, 5023), 'itertools.count', 'itertools.count', ([], {}), '()\n', (5021, 5023), False, 'import itertools\n'), ((1056, 1077), 'numpy.random.randint', 'np.random.randint', (['nA'], {}), '(nA)\n', (1073, 1077), True, 'import numpy as np\n'), ((1118, 1143), 'numpy.argmax', 'np.argmax', (['Q[observation]'], {}), '(Q[observation])\n', (1127, 1143), True, 'import numpy as np\n'), ((3332, 3360), 'numpy.zeros', 'np.zeros', (['env.action_space.n'], {}), '(env.action_space.n)\n', (3340, 3360), True, 'import numpy as np\n'), ((3452, 3474), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (3460, 3474), True, 'import numpy as np\n'), ((3500, 3522), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (3508, 3522), True, 'import numpy as np\n'), ((3622, 3650), 'numpy.zeros', 'np.zeros', (['env.action_space.n'], {}), '(env.action_space.n)\n', (3630, 3650), True, 'import numpy as np\n'), ((4815, 4833), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4831, 4833), False, 'import sys\n'), ((5449, 5473), 'numpy.argmax', 'np.argmax', (['Q[next_state]'], {}), '(Q[next_state])\n', (5458, 5473), True, 'import numpy as np\n'), ((929, 953), 'numpy.ones', 'np.ones', (['nA'], {'dtype': 'float'}), '(nA, dtype=float)\n', (936, 953), True, 'import numpy as np\n'), ((1014, 1026), 'numpy.zeros', 'np.zeros', (['nA'], {}), '(nA)\n', (1022, 1026), True, 'import numpy as np\n'), ((4335, 4389), 'numpy.allclose', 'np.allclose', (['exploration_values', 'exploration_values[0]'], {}), '(exploration_values, exploration_values[0])\n', (4346, 4389), True, 'import numpy as np\n'), ((4414, 4439), 'numpy.random.randint', 'np.random.randint', (['env.nA'], {}), '(env.nA)\n', (4431, 4439), True, 'import numpy as np\n'), ((4481, 4510), 'numpy.argmax', 'np.argmax', (['exploration_values'], {}), '(exploration_values)\n', (4490, 4510), True, 'import numpy as np\n'), ((5786, 5803), 'numpy.all', 'np.all', (['(arr >= Ne)'], {}), '(arr >= Ne)\n', (5792, 5803), True, 'import numpy as np\n')] |
import sys
import matplotlib as mpl
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from torch import nn
from torch.autograd import Variable
from tqdm import trange
import gaussian
import util
from util import sparsemm
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
import networkx as nx
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
"""
Graph convolution experiment: learn a similarity graph for a given dataset end-to-end using simplified graph
convolutions.
Abandoned. Let me know if you get it to work.
"""
def clean(axes=None):
if axes is None:
axes = plt.gca()
[s.set_visible(False) for s in axes.spines.values()]
axes.tick_params(top=False, bottom=False, left=False, right=False, labelbottom=False, labelleft=False)
def densities(points, means, sigmas):
"""
Compute the unnormalized PDFs of the points under the given MVNs
(with sigma a diagonal matrix per MVN)
:param means:
:param sigmas:
:param points:
:return:
"""
# n: number of MVNs
# d: number of points per MVN
# rank: dim of points
batchsize, n, rank = points.size()
batchsize, k, rank = means.size()
# batchsize, k, rank = sigmas.size()
points = points.unsqueeze(2).expand(batchsize, n, k, rank)
means = means.unsqueeze(1).expand_as(points)
sigmas = sigmas.unsqueeze(1).expand_as(points)
sigmas_squared = torch.sqrt(1.0/(gaussian.EPSILON + sigmas))
points = points - means
points = points * sigmas_squared
# Compute dot products for all points
# -- unroll the batch/n dimensions
points = points.view(-1, 1, rank)
# -- dot prod
products = torch.bmm(points, points.transpose(1,2))
# -- reconstruct shape
products = products.view(batchsize, n, k)
num = torch.exp(- 0.5 * products)
return num
class MatrixHyperlayer(nn.Module):
"""
Constrained version of the matrix hyperlayer. Each output get exactly k inputs
"""
def duplicates(self, tuples):
"""
Takes a list of tuples, and for each tuple that occurs mutiple times
marks all but one of the occurences (in the mask that is returned).
:param tuples: A size (batch, k, rank) tensor of integer tuples
:return: A size (batch, k) mask indicating the duplicates
"""
b, k, r = tuples.size()
primes = self.primes[:r]
primes = primes.unsqueeze(0).unsqueeze(0).expand(b, k, r)
unique = ((tuples+1) ** primes).prod(dim=2) # unique identifier for each tuple
sorted, sort_idx = torch.sort(unique, dim=1)
_, unsort_idx = torch.sort(sort_idx, dim=1)
mask = sorted[:, 1:] == sorted[:, :-1]
zs = torch.zeros(b, 1, dtype=torch.uint8, device='cuda' if self.use_cuda else 'cpu')
mask = torch.cat([zs, mask], dim=1)
return torch.gather(mask, 1, unsort_idx)
def cuda(self, device_id=None):
self.use_cuda = True
super().cuda(device_id)
def __init__(self, in_num, out_num, k, radditional=0, gadditional=0, region=(128,),
sigma_scale=0.2, min_sigma=0.0, fix_value=False):
super().__init__()
self.min_sigma = min_sigma
self.use_cuda = False
self.in_num = in_num
self.out_num = out_num
self.k = k
self.radditional = radditional
self.region = region
self.gadditional = gadditional
self.sigma_scale = sigma_scale
self.fix_value = fix_value
self.weights_rank = 2 # implied rank of W
self.params = Parameter(torch.randn(k * out_num, 3))
outs = torch.arange(out_num).unsqueeze(1).expand(out_num, k * (2 + radditional + gadditional)).contiguous().view(-1, 1)
self.register_buffer('outs', outs.long())
outs_inf = torch.arange(out_num).unsqueeze(1).expand(out_num, k).contiguous().view(-1, 1)
self.register_buffer('outs_inf', outs_inf.long())
self.register_buffer('primes', torch.tensor(util.PRIMES))
def size(self):
return (self.out_num, self.in_num)
def generate_integer_tuples(self, means,rng=None, use_cuda=False):
dv = 'cuda' if use_cuda else 'cpu'
c, k, rank = means.size()
assert rank == 1
# In the following, we cut the first dimension up into chunks of size self.k (for which the row index)
# is the same. This then functions as a kind of 'batch' dimension, allowing us to use the code from
# globalsampling without much adaptation
"""
Sample the 2 nearest points
"""
floor_mask = torch.tensor([1, 0], device=dv, dtype=torch.uint8)
fm = floor_mask.unsqueeze(0).unsqueeze(2).expand(c, k, 2, 1)
neighbor_ints = means.data.unsqueeze(2).expand(c, k, 2, 1).contiguous()
neighbor_ints[fm] = neighbor_ints[fm].floor()
neighbor_ints[~fm] = neighbor_ints[~fm].ceil()
neighbor_ints = neighbor_ints.long()
"""
Sample uniformly from a small range around the given index tuple
"""
rr_ints = torch.cuda.FloatTensor(c, k, self.radditional, 1) if use_cuda else torch.FloatTensor(c, k, self.radditional, 1)
rr_ints.uniform_()
rr_ints *= (1.0 - gaussian.EPSILON)
rng = torch.cuda.FloatTensor(rng) if use_cuda else torch.FloatTensor(rng)
rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints) # bounds of the tensor
rrng = torch.cuda.FloatTensor(self.region) if use_cuda else torch.FloatTensor(self.region) # bounds of the range from which to sample
rrng = rrng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints)
mns_expand = means.round().unsqueeze(2).expand_as(rr_ints)
# upper and lower bounds
lower = mns_expand - rrng * 0.5
upper = mns_expand + rrng * 0.5
# check for any ranges that are out of bounds
idxs = lower < 0.0
lower[idxs] = 0.0
idxs = upper > rngxp
lower[idxs] = rngxp[idxs] - rrng[idxs]
rr_ints = (rr_ints * rrng + lower).long()
"""
Sample uniformly from all index tuples
"""
g_ints = torch.cuda.FloatTensor(c, k, self.gadditional, 1) if use_cuda else torch.FloatTensor(c, k, self.gadditional, 1)
rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(g_ints) # bounds of the tensor
g_ints.uniform_()
g_ints *= (1.0 - gaussian.EPSILON) * rngxp
g_ints = g_ints.long()
ints = torch.cat([neighbor_ints, rr_ints, g_ints], dim=2)
return ints.view(c, -1, rank)
def forward(self, input, train=True):
### Compute and unpack output of hypernetwork
means, sigmas, values = self.hyper(input)
nm = means.size(0)
c = nm // self.k
means = means.view(c, self.k, 1)
sigmas = sigmas.view(c, self.k, 1)
values = values.view(c, self.k)
rng = (self.in_num, )
assert input.size(0) == self.in_num
if train:
indices = self.generate_integer_tuples(means, rng=rng, use_cuda=self.use_cuda)
indfl = indices.float()
# Mask for duplicate indices
dups = self.duplicates(indices)
props = densities(indfl, means, sigmas).clone() # result has size (c, indices.size(1), means.size(1))
props[dups] = 0
props = props / props.sum(dim=1, keepdim=True)
values = values.unsqueeze(1).expand(c, indices.size(1), means.size(1))
values = props * values
values = values.sum(dim=2)
# unroll the batch dimension
indices = indices.view(-1, 1)
values = values.view(-1)
indices = torch.cat([self.outs, indices.long()], dim=1)
else:
indices = means.round().long().view(-1, 1)
values = values.squeeze().view(-1)
indices = torch.cat([self.outs_inf, indices.long()], dim=1)
if self.use_cuda:
indices = indices.cuda()
# Kill anything on the diagonal
values[indices[:, 0] == indices[:, 1]] = 0.0
# if self.symmetric:
# # Add reverse direction automatically
# flipped_indices = torch.cat([indices[:, 1].unsqueeze(1), indices[:, 0].unsqueeze(1)], dim=1)
# indices = torch.cat([indices, flipped_indices], dim=0)
# values = torch.cat([values, values], dim=0)
### Create the sparse weight tensor
# Prevent segfault
assert not util.contains_nan(values.data)
vindices = Variable(indices.t())
sz = Variable(torch.tensor((self.out_num, self.in_num)))
spmm = sparsemm(self.use_cuda)
output = spmm(vindices, values, sz, input)
return output
def hyper(self, input=None):
"""
Evaluates hypernetwork.
"""
k, width = self.params.size()
means = F.sigmoid(self.params[:, 0:1])
# Limits for each of the w_rank indices
# and scales for the sigmas
s = torch.cuda.FloatTensor((self.in_num,)) if self.use_cuda else torch.FloatTensor((self.in_num,))
s = Variable(s.contiguous())
ss = s.unsqueeze(0)
sm = s - 1
sm = sm.unsqueeze(0)
means = means * sm.expand_as(means)
sigmas = nn.functional.softplus(self.params[:, 1:2] + gaussian.SIGMA_BOOST) + gaussian.EPSILON
values = self.params[:, 2:] # * 0.0 + 1.0
sigmas = sigmas.expand_as(means)
sigmas = sigmas * ss.expand_as(sigmas)
sigmas = sigmas * self.sigma_scale + self.min_sigma
return means, sigmas, values * 0.0 + 1.0/self.k if self.fix_value else values
class GraphConvolution(Module):
"""
Code adapted from pyGCN, see https://github.com/tkipf/pygcn
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True, has_weight=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if has_weight else None
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.weight is not None:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_() # different from the default implementation
def forward(self, input, adj, train=True):
if input is None: # The input is the identity matrix
support = self.weight
elif self.weight is not None:
support = torch.mm(input, self.weight)
else:
support = input
output = adj(support, train=train)
if self.bias is not None:
return output + self.bias
else:
return output
class ConvModel(nn.Module):
def __init__(self, data_size, k, emb_size = 16, radd=32, gadd=32, range=128, min_sigma=0.0, directed=True, fix_value=False, encoder=False):
super().__init__()
self.data_shape = data_size
n, c, h, w = data_size
# - channel sizes
c1, c2, c3 = 16, 32, 64
h1, h2, h3 = 256, 128, 64
# upmode = 'bilinear'
# self.decoder_conv = nn.Sequential(
# nn.Linear(h3, 4 * 4 * c3), nn.ReLU(),
# util.Reshape((c3, 4, 4)),
# nn.ConvTranspose2d(c3, c3, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c3, c3, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c3, c2, (3, 3), padding=1), nn.ReLU(),
# nn.Upsample(scale_factor=3, mode=upmode),
# nn.ConvTranspose2d(c2, c2, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c2, c2, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c2, c1, (3, 3), padding=1), nn.ReLU(),
# nn.Upsample(scale_factor=2, mode=upmode),
# nn.ConvTranspose2d(c1, c1, (5, 5), padding=0), nn.ReLU(),
# nn.ConvTranspose2d(c1, c1, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c1, 1, (3, 3), padding=1), nn.Sigmoid(),
# # util.Debug(lambda x : print(x.size()))
# )
#
# self.decoder_lin = nn.Sequential(
# nn.Linear(emb_size, h3), nn.ReLU(),
# nn.Linear(h3, h2), nn.ReLU(),
# nn.Linear(h2, h3),
# )
#
# self.decoder = nn.Sequential(
# self.decoder_lin,
# self.decoder_conv
# )
# Encoder is only used during pretraining
self.encoder = nn.Sequential(
util.Flatten(),
nn.Linear(28*28, h2), nn.ReLU(),
nn.Linear(h2, h3), nn.ReLU(),
nn.Linear(h3, emb_size * 2)
)
self.decoder = nn.Sequential(
nn.Linear(emb_size, h3), nn.ReLU(),
nn.Linear(h3, h2), nn.ReLU(),
nn.Linear(h2, h3), nn.ReLU(),
nn.Linear(h3, 28*28), nn.Sigmoid(),
util.Reshape((1, 28, 28))
)
# self.encoder = None
# if encoder:
# self.encoder_conv = nn.Sequential(
# nn.Conv2d(1, c1, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c1, c1, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c1, c1, (3, 3), padding=1), nn.ReLU(),
# nn.MaxPool2d((2, 2)),
# nn.Conv2d(c1, c2, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c2, c2, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c2, c2, (3, 3), padding=1), nn.ReLU(),
# nn.MaxPool2d((2, 2)),
# nn.Conv2d(c2, c3, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c3, c3, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c3, c3, (3, 3), padding=1), nn.ReLU(),
# nn.MaxPool2d((2, 2)),
# util.Flatten(),
# nn.Linear(9 * c3, h1)
# )
#
# self.encoder_lin = nn.Sequential(
# util.Flatten(),
# nn.Linear(h1, h2), nn.ReLU(),
# nn.Linear(h2, h3), nn.ReLU(),
# nn.Linear(h3, emb_size * 2),
# )
#
# self.encoder = nn.Sequential(
# self.encoder_conv,
# self.encoder_lin
# )
#
self.adj = MatrixHyperlayer(n, n, k, radditional=radd, gadditional=gadd, region=(range,),
min_sigma=min_sigma, fix_value=fix_value)
self.embedding = Parameter(torch.randn(n, emb_size))
self.emb_size = emb_size
# self.embedding_conv = GraphConvolution(n, emb_size, bias=False)
# self.weightless_conv = GraphConvolution(emb_size, emb_size, has_weight=False, bias=False)
def freeze(self):
for param in self.encoder_conv.parameters():
param.requires_grad = False
for param in self.decoder_conv.parameters():
param.requires_grad = False
def forward(self, depth=1, train=True, data=None): #, reg=util.kl_batch
# x0 = self.embedding_conv.weight
# x = self.embedding_conv(input=None, adj=self.adj, train=train) # identity matrix input
# results = [x0, x]
# for i in range(1, depth):
# x = self.weightless_conv(input=x, adj=self.adj, train=train)
# results.append(x)
reg_losses = []
if self.encoder is None:
x = self.embedding
else:
xraw = self.encoder(data)
xmean, xsig = xraw[:, :self.emb_size], xraw[:, self.emb_size:]
reg_losses.append(util.kl_loss(xmean, xsig)[:, None])
x = util.vae_sample(xmean, xsig)
n, e = x.size()
results = [x]
# reg_losses.append(reg(x))
for _ in range(1, depth):
x = self.adj(x, train=train)
results.append(x)
# reg_losses.append(reg(x))
# if self.encoder is None:
# return [self.decoder(r) for r in results]
# else:
return [self.decoder(r) for r in results], None #, reg_losses
def cuda(self):
super().cuda()
self.adj.apply(lambda t: t.cuda())
def train_decoder(self, data, epochs=1000, lr=0.0001,
batch_size=256, cuda=torch.cuda.is_available()):
n, c, h, w = data.size()
params = list(self.encoder.parameters()) + list(self.decoder.parameters())
opt = torch.optim.Adam(params, lr=lr)
for e in trange(epochs):
for fr in range(0, n, batch_size):
to = min(fr + batch_size, n)
batch, b = data[fr:to], to - fr
if cuda:
batch = batch.cuda()
batch = Variable(batch)
opt.zero_grad()
# forward
z = self.encoder(batch)
kl = util.kl_loss(z[:, :self.emb_size], z[:, self.emb_size:])
z = util.vae_sample(z[:, :self.emb_size], z[:, self.emb_size:])
rec = self.decoder(z)
# backward
loss = F.binary_cross_entropy(rec, batch, reduce=False).view(b, -1).sum(dim=1) + kl
loss.mean().backward()
opt.step()
self.embedding.data = self.encoder(data).data[:, :self.emb_size]
self.embedding.requires_grad = False
for p in self.decoder.parameters():
p.requires_grad = False
#
class ConvModelFlat(nn.Module):
def __init__(self, data_size, k, radd=128, gadd=128, range=16, min_sigma=0.0, fix_value=False):
super().__init__()
n, c, h, w = data_size
self.adj = MatrixHyperlayer(n, n, k, radditional=radd, gadditional=gadd, min_sigma=min_sigma,
region=(range,), fix_value=fix_value)
def forward(self, data, depth=1, train=True):
n = data.size(0)
x = data.view(n, -1)
results =[]
for _ in range(depth):
x = self.adj(x, train=train)
results.append(x)
return [r.view(data.size()).clamp(0, 1) for r in results]
def cuda(self):
super().cuda()
self.adj.apply(lambda t: t.cuda())
PLOT_MAX = 2000 # max number of data points for the latent space plot
def go(arg):
MARGIN = 0.1
util.makedirs('./conv/')
torch.manual_seed(arg.seed)
writer = SummaryWriter()
mnist = torchvision.datasets.MNIST(root=arg.data, train=True, download=True, transform=transforms.ToTensor())
data = util.totensor(mnist, shuffle=True, maxclass=None)
assert data.min() == 0 and data.max() == 1.0
if arg.limit is not None:
data = data[:arg.limit]
model = ConvModel(data.size(), k=arg.k, emb_size=arg.emb_size,
gadd=arg.gadditional, radd=arg.radditional, range=arg.range,
min_sigma=arg.min_sigma, fix_value=arg.fix_value, encoder=arg.encoder)
# model = ConvModelFlat(data.size(), k=arg.k,
# gadd=arg.gadditional, radd=arg.radditional, range=arg.range,
# min_sigma=arg.min_sigma, fix_value=arg.fix_value)
if arg.cuda:
model.cuda()
data = data.cuda()
data, target = Variable(data), Variable(data)
optimizer = optim.Adam(
list(model.parameters()), lr=arg.lr)
n, c, h, w = data.size()
print('pretraining')
model.train_decoder(data, epochs=arg.pretrain_epochs, cuda=arg.cuda)
print('training')
for epoch in trange(arg.epochs):
optimizer.zero_grad()
outputs, _ = model(depth=arg.depth, data=data)
# reg=lambda x: util.kl_batch(x)[None, None].expand(n, 1) * 10000
# reg = lambda x: x.norm(dim=1, keepdim=True)
# reg=lambda x: F.relu(x.norm(dim=1, keepdim=True) - 1.0) * 1000
# reg=lambda x : torch.zeros(x.size(0), 1)
rec_losses = []
for i, o in enumerate(outputs):
rec_losses.append( F.binary_cross_entropy(o, target, reduce=False).view(n, -1).sum(dim=1, keepdim=True) )
#
# losses = torch.cat(rec_losses + reg_losses, dim=1).mean(dim=0)
# regularize sigmas
_, sigmas, _ = model.adj.hyper()
reg = sigmas.norm().mean()
loss = torch.cat(rec_losses, dim=1).sum()
# print(loss, reg)
# sys.exit()
tloss = loss + arg.regweight * reg
tloss.backward()
optimizer.step()
writer.add_scalar('conv/train-loss', loss.item(), epoch)
if epoch % arg.plot_every == 0:
plt.figure(figsize=(8, 2))
# print(losses)
# if arg.depth > 1:
# print(' adj', model.adj.params.grad.mean().item())
# # print(' lin', next(model.decoder.parameters()).grad.mean().item())
plt.cla()
plt.imshow(np.transpose(torchvision.utils.make_grid(data.data[:16, :]).cpu().numpy(), (1, 2, 0)),
interpolation='nearest')
plt.savefig('./conv/inp.{:05d}.png'.format(epoch))
# Plot the results
with torch.no_grad():
outputs, _ = model(depth=arg.depth, train=False, data=data)
for d, o in enumerate(outputs):
plt.cla()
plt.imshow(np.transpose(torchvision.utils.make_grid(o.data[:16, :]).cpu().numpy(), (1, 2, 0)),
interpolation='nearest')
plt.savefig('./conv/rec.{:05d}.{:02d}.png'.format(epoch, d))
plt.figure(figsize=(8, 8))
means, sigmas, values = model.adj.hyper()
means, sigmas, values = means.data, sigmas.data, values.data
means = torch.cat([model.adj.outs_inf.data.float(), means], dim=1)
if arg.draw_matrix:
plt.cla()
s = model.adj.size()
util.plot1d(means, sigmas, values.squeeze(), shape=s)
plt.xlim((-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN)))
plt.ylim((-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN)))
plt.savefig('./conv/means.{:05}.pdf'.format(epoch))
graph = np.concatenate([means.round().long().cpu().numpy(), values.cpu().numpy()], axis=1)
np.savetxt('graph.{:05}.csv'.format(epoch), graph)
"""
Plot the data, reconstructions and components
"""
w, h = 24, 1 + arg.depth + arg.k
mround = means.round().long()
plt.figure(figsize=(w, h))
norm = mpl.colors.Normalize(vmin=-1.0,
vmax=1.0) # doing this manually, the nx code produces very strange results
map = mpl.cm.ScalarMappable(norm=norm, cmap=plt.cm.RdYlBu)
for i in range(w):
# plot the image
ax = plt.subplot(h, w, i + 1)
im = np.transpose(data[i, :, :, :].cpu().numpy(), (1, 2, 0))
im = np.squeeze(im)
ax.imshow(im, interpolation='nearest', origin='upper', cmap='gray_r')
if i == 0:
ax.set_ylabel('image')
clean(ax)
# plot the reconstructions
for r, output in enumerate(outputs):
ax = plt.subplot(h, w, w*(r+1) +(i + 1))
im = np.transpose(output[i, :, :, :].cpu().numpy(), (1, 2, 0))
im = np.squeeze(im)
ax.imshow(im, interpolation='nearest', origin='upper', cmap='gray_r')
if i == 0:
ax.set_ylabel('rec. {}'.format(r))
clean(ax)
# plot the components
for c in range(arg.k):
ax = plt.subplot(h, w, w*(c+1+len(outputs)) +(i + 1))
comp = mround.view(-1, arg.k, 2)[i, c, 1]
mult = values.view(-1, arg.k)[i, c]
color = np.asarray(map.to_rgba(mult))[:3]
im = np.transpose(data[comp, :, :, :].cpu().numpy(), (1, 2, 0))
im = im * (1.0 - color)
im = 1.0 - im
ax.imshow(im,
interpolation='nearest',
origin='upper')
clean(ax)
if i == 0:
ax.set_ylabel('comp. {}'.format(c+1))
plt.subplots_adjust(wspace=None, hspace=None)
#fig.tight_layout()
plt.savefig('./conv/examples{:03}.pdf'.format(epoch), dpi=72)
"""
Plot the embeddings
"""
if arg.depth == 2:
map = None
else:
norm = mpl.colors.Normalize(vmin=1.0, vmax=arg.depth)
map = mpl.cm.ScalarMappable(norm=norm, cmap=plt.cm.tab10)
latents = model.encoder(data) if arg.encoder else model.embedding.data
images = data.data.cpu().permute(0, 2, 3, 1).numpy()[:PLOT_MAX, :]
ax = None
size = None
for d in range(arg.depth):
if d == 0:
color = None
elif map is None:
color = np.asarray([0.0, 0.0, 1.0, 1.0])
else:
color = map.to_rgba(d)
l2 = latents[:PLOT_MAX, :2]
ax, size = util.scatter_imgs(l2.cpu().numpy(), images, ax=ax, color=color, size=size)
if d < arg.depth - 1:
latents = model.adj(latents, train=False)
util.clean(ax)
plt.savefig('./conv/latent-space.{:05}.pdf'.format(epoch), dpi=600)
"""
Plot the graph (reasonable results for small datasets)
"""
if arg.draw_graph:
# Plot the graph
outputs = model(depth=arg.depth, train=False)
g = nx.MultiDiGraph()
g.add_nodes_from(range(data.size(0)))
print('Drawing graph at ', epoch, 'epochs')
for i in range(means.size(0)):
m = means[i, :].round().long()
v = values[i]
g.add_edge(m[1].item(), m[0].item(), weight=v.item() )
# print(m[1].item(), m[0].item(), v.item())
plt.figure(figsize=(8,8))
ax = plt.subplot(111)
pos = nx.spring_layout(g, iterations=100, k=5/math.sqrt(data.size(0)))
# pos = nx.circular_layout(g)
nx.draw_networkx_nodes(g, pos, node_size=30, node_color='w', node_shape='s', axes=ax)
# edges = nx.draw_networkx_edges(g, pos, edge_color=values.data.view(-1), edge_vmin=0.0, edge_vmax=1.0, cmap='bone')
weights = [d['weight'] for (_, _, d) in g.edges(data=True)]
colors = map.to_rgba(weights)
nx.draw_networkx_edges(g, pos, width=1.0, edge_color=colors, axes=ax)
ims = 0.03
xmin, xmax = float('inf'), float('-inf')
ymin, ymax = float('inf'), float('-inf')
out0 = outputs[1].data
# out1 = outputs[1].data
for i, coords in pos.items():
extent = (coords[0] - ims, coords[0] + ims, coords[1] - ims, coords[1] + ims)
# extent0 = (coords[0] - ims, coords[0] + ims, coords[1] + ims, coords[1] + 3 * ims)
# extent1 = (coords[0] - ims, coords[0] + ims, coords[1] + 3 * ims, coords[1] + 5 * ims)
ax.imshow(data[i].cpu().squeeze(), cmap='gray_r', extent=extent, zorder=100, alpha=1)
# ax.imshow(out0[i].cpu().squeeze(), cmap='pink_r', extent=extent0, zorder=100, alpha=0.85)
# ax.imshow(out1[i].cpu().squeeze(), cmap='pink_r', extent=extent1, zorder=100)
xmin, xmax = min(coords[0], xmin), max(coords[0], xmax)
ymin, ymax = min(coords[1], ymin), max(coords[1], ymax)
MARGIN = 0.3
ax.set_xlim(xmin-MARGIN, xmax+MARGIN)
ax.set_ylim(ymin-MARGIN, ymax+MARGIN)
plt.axis('off')
plt.savefig('./conv/graph.{:05}.pdf'.format(epoch), dpi=300)
print('Finished Training.')
def test():
"""
Poor man's unit test
"""
indices = Variable(torch.tensor([[0,1],[1,0],[2,1]]), requires_grad=True)
values = Variable(torch.tensor([1.0, 2.0, 3.0]), requires_grad=True)
size = Variable(torch.tensor([3, 2]))
wsparse = torch.sparse.FloatTensor(indices.t(), values, (3,2))
wdense = Variable(torch.tensor([[0.0,1.0],[2.0,0.0],[0.0, 3.0]]), requires_grad=True)
x = Variable(torch.randn(2, 4), requires_grad=True)
#
# print(wsparse)
# print(wdense)
# print(x)
# dense version
mul = torch.mm(wdense, x)
loss = mul.norm()
loss.backward()
print('dw', wdense.grad)
print('dx', x.grad)
del loss
# spmm version
# mul = torch.mm(wsparse, x)
# loss = mul.norm()
# loss.backward()
#
# print('dw', values.grad)
# print('dx', x.grad)
x.grad = None
values.grad = None
mul = util.SparseMultCPU.apply(indices.t(), values, size, x)
loss = mul.norm()
loss.backward()
print('dw', values.grad)
print('dx', x.grad)
# Finite elements approach for w
for h in [1e-4, 1e-5, 1e-6]:
grad = torch.zeros(values.size(0))
for i in range(values.size(0)):
nvalues = values.clone()
nvalues[i] = nvalues[i] + h
mul = util.SparseMultCPU.apply(indices.t(), values, size, x)
loss0 = mul.norm()
mul = util.SparseMultCPU.apply(indices.t(), nvalues, size, x)
loss1 = mul.norm()
grad[i] = (loss1-loss0)/h
print('hw', h, grad)
# Finite elements approach for x
for h in [1e-4, 1e-5, 1e-6]:
grad = torch.zeros(x.size())
for i in range(x.size(0)):
for j in range(x.size(1)):
nx = x.clone()
nx[i, j] = x[i, j] + h
mul = util.SparseMultCPU.apply(indices.t(), values, size, x)
loss0 = mul.norm()
mul = util.SparseMultCPU.apply(indices.t(), values, size, nx)
loss1 = mul.norm()
grad[i, j] = (loss1-loss0)/h
print('hx', h, grad)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--test", dest="test",
help="Run the unit tests.",
action="store_true")
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of epochs",
default=250, type=int)
parser.add_argument("-P", "--pretrain-epochs",
dest="pretrain_epochs",
help="Number of epochs spent optimizing the embeddings. After this cutoff, the embeddings are frozen, and the adjacency matrix is trained.",
default=1000, type=int)
parser.add_argument("-E", "--emb_size",
dest="emb_size",
help="Size of the node embeddings.",
default=16, type=int)
parser.add_argument("-k", "--num-points",
dest="k",
help="Number of index tuples",
default=3, type=int)
parser.add_argument("-L", "--limit",
dest="limit",
help="Number of data points",
default=None, type=int)
parser.add_argument("-a", "--gadditional",
dest="gadditional",
help="Number of additional points sampled globally per index-tuple",
default=32, type=int)
parser.add_argument("-A", "--radditional",
dest="radditional",
help="Number of additional points sampled locally per index-tuple",
default=16, type=int)
parser.add_argument("-R", "--range",
dest="range",
help="Range in which the local points are sampled",
default=128, type=int)
parser.add_argument("-d", "--depth",
dest="depth",
help="Number of graph convolutions",
default=5, type=int)
parser.add_argument("-p", "--plot-every",
dest="plot_every",
help="Numer of epochs to wait between plotting",
default=100, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.01, type=float)
parser.add_argument("-r", "--seed",
dest="seed",
help="Random seed",
default=4, type=int)
parser.add_argument("-c", "--cuda", dest="cuda",
help="Whether to use cuda.",
action="store_true")
#
# parser.add_argument("-S", "--undirected", dest="undirected",
# help="Use an undirected graph",
# action="store_true")
parser.add_argument("-J", "--draw-matrix", dest="draw_matrix",
help="Draw the adjacency matrix",
action="store_true")
parser.add_argument("-G", "--draw-graph", dest="draw_graph",
help="Draw the graph",
action="store_true")
parser.add_argument("-F", "--fix-value", dest="fix_value",
help="Fix the values of the matrix to 1/k",
action="store_true")
parser.add_argument("-N", "--use-encoder", dest="encoder",
help="Whether to use an encoder",
action="store_true")
parser.add_argument("-D", "--data", dest="data",
help="Data directory",
default='./data')
parser.add_argument("-M", "--min-sigma",
dest="min_sigma",
help="Minimal sigma value",
default=0.0, type=float)
parser.add_argument("-Q", "--regularization-weight",
dest="regweight",
help="Regularization weight (the bigger this is, the faster the sigma's converge).",
default=0.0, type=float)
args = parser.parse_args()
if args.test:
test()
print('Tests completed succesfully.')
sys.exit()
print('OPTIONS', args)
go(args)
| [
"util.totensor",
"torch.nn.ReLU",
"util.contains_nan",
"util.kl_loss",
"torch.sqrt",
"torch.exp",
"torch.nn.functional.sigmoid",
"networkx.draw_networkx_nodes",
"torch.cuda.is_available",
"sys.exit",
"torchvision.utils.make_grid",
"util.Flatten",
"torch.arange",
"networkx.draw_networkx_edg... | [((336, 350), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (343, 350), True, 'import matplotlib as mpl\n'), ((1620, 1665), 'torch.sqrt', 'torch.sqrt', (['(1.0 / (gaussian.EPSILON + sigmas))'], {}), '(1.0 / (gaussian.EPSILON + sigmas))\n', (1630, 1665), False, 'import torch\n'), ((2008, 2034), 'torch.exp', 'torch.exp', (['(-0.5 * products)'], {}), '(-0.5 * products)\n', (2017, 2034), False, 'import torch\n'), ((18872, 18896), 'util.makedirs', 'util.makedirs', (['"""./conv/"""'], {}), "('./conv/')\n", (18885, 18896), False, 'import util\n'), ((18901, 18928), 'torch.manual_seed', 'torch.manual_seed', (['arg.seed'], {}), '(arg.seed)\n', (18918, 18928), False, 'import torch\n'), ((18943, 18958), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (18956, 18958), False, 'from tensorboardX import SummaryWriter\n'), ((19086, 19135), 'util.totensor', 'util.totensor', (['mnist'], {'shuffle': '(True)', 'maxclass': 'None'}), '(mnist, shuffle=True, maxclass=None)\n', (19099, 19135), False, 'import util\n'), ((20063, 20081), 'tqdm.trange', 'trange', (['arg.epochs'], {}), '(arg.epochs)\n', (20069, 20081), False, 'from tqdm import trange\n'), ((29992, 30011), 'torch.mm', 'torch.mm', (['wdense', 'x'], {}), '(wdense, x)\n', (30000, 30011), False, 'import torch\n'), ((31597, 31613), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (31611, 31613), False, 'from argparse import ArgumentParser\n'), ((815, 824), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (822, 824), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2811), 'torch.sort', 'torch.sort', (['unique'], {'dim': '(1)'}), '(unique, dim=1)\n', (2796, 2811), False, 'import torch\n'), ((2836, 2863), 'torch.sort', 'torch.sort', (['sort_idx'], {'dim': '(1)'}), '(sort_idx, dim=1)\n', (2846, 2863), False, 'import torch\n'), ((2926, 3005), 'torch.zeros', 'torch.zeros', (['b', '(1)'], {'dtype': 'torch.uint8', 'device': "('cuda' if self.use_cuda else 'cpu')"}), "(b, 1, dtype=torch.uint8, device='cuda' if self.use_cuda else 'cpu')\n", (2937, 3005), False, 'import torch\n'), ((3021, 3049), 'torch.cat', 'torch.cat', (['[zs, mask]'], {'dim': '(1)'}), '([zs, mask], dim=1)\n', (3030, 3049), False, 'import torch\n'), ((3066, 3099), 'torch.gather', 'torch.gather', (['mask', '(1)', 'unsort_idx'], {}), '(mask, 1, unsort_idx)\n', (3078, 3099), False, 'import torch\n'), ((4817, 4867), 'torch.tensor', 'torch.tensor', (['[1, 0]'], {'device': 'dv', 'dtype': 'torch.uint8'}), '([1, 0], device=dv, dtype=torch.uint8)\n', (4829, 4867), False, 'import torch\n'), ((6726, 6776), 'torch.cat', 'torch.cat', (['[neighbor_ints, rr_ints, g_ints]'], {'dim': '(2)'}), '([neighbor_ints, rr_ints, g_ints], dim=2)\n', (6735, 6776), False, 'import torch\n'), ((8930, 8953), 'util.sparsemm', 'sparsemm', (['self.use_cuda'], {}), '(self.use_cuda)\n', (8938, 8953), False, 'from util import sparsemm\n'), ((9173, 9203), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['self.params[:, 0:1]'], {}), '(self.params[:, 0:1])\n', (9182, 9203), True, 'import torch.nn.functional as F\n'), ((16847, 16872), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16870, 16872), False, 'import torch\n'), ((17007, 17038), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'lr'}), '(params, lr=lr)\n', (17023, 17038), False, 'import torch\n'), ((17057, 17071), 'tqdm.trange', 'trange', (['epochs'], {}), '(epochs)\n', (17063, 17071), False, 'from tqdm import trange\n'), ((19789, 19803), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (19797, 19803), False, 'from torch.autograd import Variable\n'), ((19805, 19819), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (19813, 19819), False, 'from torch.autograd import Variable\n'), ((29514, 29552), 'torch.tensor', 'torch.tensor', (['[[0, 1], [1, 0], [2, 1]]'], {}), '([[0, 1], [1, 0], [2, 1]])\n', (29526, 29552), False, 'import torch\n'), ((29591, 29620), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (29603, 29620), False, 'import torch\n'), ((29662, 29682), 'torch.tensor', 'torch.tensor', (['[3, 2]'], {}), '([3, 2])\n', (29674, 29682), False, 'import torch\n'), ((29775, 29825), 'torch.tensor', 'torch.tensor', (['[[0.0, 1.0], [2.0, 0.0], [0.0, 3.0]]'], {}), '([[0.0, 1.0], [2.0, 0.0], [0.0, 3.0]])\n', (29787, 29825), False, 'import torch\n'), ((29860, 29877), 'torch.randn', 'torch.randn', (['(2)', '(4)'], {}), '(2, 4)\n', (29871, 29877), False, 'import torch\n'), ((35924, 35934), 'sys.exit', 'sys.exit', ([], {}), '()\n', (35932, 35934), False, 'import sys\n'), ((3792, 3819), 'torch.randn', 'torch.randn', (['(k * out_num)', '(3)'], {}), '(k * out_num, 3)\n', (3803, 3819), False, 'import torch\n'), ((4197, 4222), 'torch.tensor', 'torch.tensor', (['util.PRIMES'], {}), '(util.PRIMES)\n', (4209, 4222), False, 'import torch\n'), ((5290, 5339), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['c', 'k', 'self.radditional', '(1)'], {}), '(c, k, self.radditional, 1)\n', (5312, 5339), False, 'import torch\n'), ((5357, 5401), 'torch.FloatTensor', 'torch.FloatTensor', (['c', 'k', 'self.radditional', '(1)'], {}), '(c, k, self.radditional, 1)\n', (5374, 5401), False, 'import torch\n'), ((5489, 5516), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['rng'], {}), '(rng)\n', (5511, 5516), False, 'import torch\n'), ((5534, 5556), 'torch.FloatTensor', 'torch.FloatTensor', (['rng'], {}), '(rng)\n', (5551, 5556), False, 'import torch\n'), ((5675, 5710), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['self.region'], {}), '(self.region)\n', (5697, 5710), False, 'import torch\n'), ((5728, 5758), 'torch.FloatTensor', 'torch.FloatTensor', (['self.region'], {}), '(self.region)\n', (5745, 5758), False, 'import torch\n'), ((6388, 6437), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['c', 'k', 'self.gadditional', '(1)'], {}), '(c, k, self.gadditional, 1)\n', (6410, 6437), False, 'import torch\n'), ((6455, 6499), 'torch.FloatTensor', 'torch.FloatTensor', (['c', 'k', 'self.gadditional', '(1)'], {}), '(c, k, self.gadditional, 1)\n', (6472, 6499), False, 'import torch\n'), ((8776, 8806), 'util.contains_nan', 'util.contains_nan', (['values.data'], {}), '(values.data)\n', (8793, 8806), False, 'import util\n'), ((8871, 8912), 'torch.tensor', 'torch.tensor', (['(self.out_num, self.in_num)'], {}), '((self.out_num, self.in_num))\n', (8883, 8912), False, 'import torch\n'), ((9301, 9339), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(self.in_num,)'], {}), '((self.in_num,))\n', (9323, 9339), False, 'import torch\n'), ((9362, 9395), 'torch.FloatTensor', 'torch.FloatTensor', (['(self.in_num,)'], {}), '((self.in_num,))\n', (9379, 9395), False, 'import torch\n'), ((9573, 9639), 'torch.nn.functional.softplus', 'nn.functional.softplus', (['(self.params[:, 1:2] + gaussian.SIGMA_BOOST)'], {}), '(self.params[:, 1:2] + gaussian.SIGMA_BOOST)\n', (9595, 9639), False, 'from torch import nn\n'), ((13120, 13134), 'util.Flatten', 'util.Flatten', ([], {}), '()\n', (13132, 13134), False, 'import util\n'), ((13148, 13170), 'torch.nn.Linear', 'nn.Linear', (['(28 * 28)', 'h2'], {}), '(28 * 28, h2)\n', (13157, 13170), False, 'from torch import nn\n'), ((13173, 13182), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13180, 13182), False, 'from torch import nn\n'), ((13196, 13213), 'torch.nn.Linear', 'nn.Linear', (['h2', 'h3'], {}), '(h2, h3)\n', (13205, 13213), False, 'from torch import nn\n'), ((13221, 13230), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13228, 13230), False, 'from torch import nn\n'), ((13244, 13271), 'torch.nn.Linear', 'nn.Linear', (['h3', '(emb_size * 2)'], {}), '(h3, emb_size * 2)\n', (13253, 13271), False, 'from torch import nn\n'), ((13333, 13356), 'torch.nn.Linear', 'nn.Linear', (['emb_size', 'h3'], {}), '(emb_size, h3)\n', (13342, 13356), False, 'from torch import nn\n'), ((13358, 13367), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13365, 13367), False, 'from torch import nn\n'), ((13381, 13398), 'torch.nn.Linear', 'nn.Linear', (['h3', 'h2'], {}), '(h3, h2)\n', (13390, 13398), False, 'from torch import nn\n'), ((13406, 13415), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13413, 13415), False, 'from torch import nn\n'), ((13429, 13446), 'torch.nn.Linear', 'nn.Linear', (['h2', 'h3'], {}), '(h2, h3)\n', (13438, 13446), False, 'from torch import nn\n'), ((13454, 13463), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13461, 13463), False, 'from torch import nn\n'), ((13477, 13499), 'torch.nn.Linear', 'nn.Linear', (['h3', '(28 * 28)'], {}), '(h3, 28 * 28)\n', (13486, 13499), False, 'from torch import nn\n'), ((13502, 13514), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (13512, 13514), False, 'from torch import nn\n'), ((13528, 13553), 'util.Reshape', 'util.Reshape', (['(1, 28, 28)'], {}), '((1, 28, 28))\n', (13540, 13553), False, 'import util\n'), ((15086, 15110), 'torch.randn', 'torch.randn', (['n', 'emb_size'], {}), '(n, emb_size)\n', (15097, 15110), False, 'import torch\n'), ((16217, 16245), 'util.vae_sample', 'util.vae_sample', (['xmean', 'xsig'], {}), '(xmean, xsig)\n', (16232, 16245), False, 'import util\n'), ((19051, 19072), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (19070, 19072), True, 'import torchvision.transforms as transforms\n'), ((21109, 21135), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 2)'}), '(figsize=(8, 2))\n', (21119, 21135), True, 'import matplotlib.pyplot as plt\n'), ((21371, 21380), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (21378, 21380), True, 'import matplotlib.pyplot as plt\n'), ((10368, 10412), 'torch.FloatTensor', 'torch.FloatTensor', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (10385, 10412), False, 'import torch\n'), ((10489, 10520), 'torch.FloatTensor', 'torch.FloatTensor', (['out_features'], {}), '(out_features)\n', (10506, 10520), False, 'import torch\n'), ((11112, 11140), 'torch.mm', 'torch.mm', (['input', 'self.weight'], {}), '(input, self.weight)\n', (11120, 11140), False, 'import torch\n'), ((17305, 17320), 'torch.autograd.Variable', 'Variable', (['batch'], {}), '(batch)\n', (17313, 17320), False, 'from torch.autograd import Variable\n'), ((17443, 17499), 'util.kl_loss', 'util.kl_loss', (['z[:, :self.emb_size]', 'z[:, self.emb_size:]'], {}), '(z[:, :self.emb_size], z[:, self.emb_size:])\n', (17455, 17499), False, 'import util\n'), ((17521, 17580), 'util.vae_sample', 'util.vae_sample', (['z[:, :self.emb_size]', 'z[:, self.emb_size:]'], {}), '(z[:, :self.emb_size], z[:, self.emb_size:])\n', (17536, 17580), False, 'import util\n'), ((20811, 20839), 'torch.cat', 'torch.cat', (['rec_losses'], {'dim': '(1)'}), '(rec_losses, dim=1)\n', (20820, 20839), False, 'import torch\n'), ((21651, 21666), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21664, 21666), False, 'import torch\n'), ((22093, 22119), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (22103, 22119), True, 'import matplotlib.pyplot as plt\n'), ((23149, 23175), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(w, h)'}), '(figsize=(w, h))\n', (23159, 23175), True, 'import matplotlib.pyplot as plt\n'), ((23200, 23241), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-1.0)', 'vmax': '(1.0)'}), '(vmin=-1.0, vmax=1.0)\n', (23220, 23241), True, 'import matplotlib as mpl\n'), ((23374, 23426), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'plt.cm.RdYlBu'}), '(norm=norm, cmap=plt.cm.RdYlBu)\n', (23395, 23426), True, 'import matplotlib as mpl\n'), ((25239, 25284), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': 'None', 'hspace': 'None'}), '(wspace=None, hspace=None)\n', (25258, 25284), True, 'import matplotlib.pyplot as plt\n'), ((26514, 26528), 'util.clean', 'util.clean', (['ax'], {}), '(ax)\n', (26524, 26528), False, 'import util\n'), ((16165, 16190), 'util.kl_loss', 'util.kl_loss', (['xmean', 'xsig'], {}), '(xmean, xsig)\n', (16177, 16190), False, 'import util\n'), ((21814, 21823), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (21821, 21823), True, 'import matplotlib.pyplot as plt\n'), ((22396, 22405), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (22403, 22405), True, 'import matplotlib.pyplot as plt\n'), ((22542, 22603), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN))'], {}), '((-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN)))\n', (22550, 22603), True, 'import matplotlib.pyplot as plt\n'), ((22624, 22685), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN))'], {}), '((-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN)))\n', (22632, 22685), True, 'import matplotlib.pyplot as plt\n'), ((23526, 23550), 'matplotlib.pyplot.subplot', 'plt.subplot', (['h', 'w', '(i + 1)'], {}), '(h, w, i + 1)\n', (23537, 23550), True, 'import matplotlib.pyplot as plt\n'), ((23657, 23671), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (23667, 23671), True, 'import numpy as np\n'), ((25592, 25638), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(1.0)', 'vmax': 'arg.depth'}), '(vmin=1.0, vmax=arg.depth)\n', (25612, 25638), True, 'import matplotlib as mpl\n'), ((25665, 25716), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'plt.cm.tab10'}), '(norm=norm, cmap=plt.cm.tab10)\n', (25686, 25716), True, 'import matplotlib as mpl\n'), ((26888, 26905), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (26903, 26905), True, 'import networkx as nx\n'), ((27342, 27368), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (27352, 27368), True, 'import matplotlib.pyplot as plt\n'), ((27393, 27409), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (27404, 27409), True, 'import matplotlib.pyplot as plt\n'), ((27573, 27662), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['g', 'pos'], {'node_size': '(30)', 'node_color': '"""w"""', 'node_shape': '"""s"""', 'axes': 'ax'}), "(g, pos, node_size=30, node_color='w', node_shape='s',\n axes=ax)\n", (27595, 27662), True, 'import networkx as nx\n'), ((27949, 28018), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['g', 'pos'], {'width': '(1.0)', 'edge_color': 'colors', 'axes': 'ax'}), '(g, pos, width=1.0, edge_color=colors, axes=ax)\n', (27971, 28018), True, 'import networkx as nx\n'), ((29305, 29320), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (29313, 29320), True, 'import matplotlib.pyplot as plt\n'), ((24007, 24047), 'matplotlib.pyplot.subplot', 'plt.subplot', (['h', 'w', '(w * (r + 1) + (i + 1))'], {}), '(h, w, w * (r + 1) + (i + 1))\n', (24018, 24047), True, 'import matplotlib.pyplot as plt\n'), ((24159, 24173), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (24169, 24173), True, 'import numpy as np\n'), ((26126, 26158), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (26136, 26158), True, 'import numpy as np\n'), ((20519, 20566), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['o', 'target'], {'reduce': '(False)'}), '(o, target, reduce=False)\n', (20541, 20566), True, 'import torch.nn.functional as F\n'), ((17672, 17720), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['rec', 'batch'], {'reduce': '(False)'}), '(rec, batch, reduce=False)\n', (17694, 17720), True, 'import torch.nn.functional as F\n'), ((21417, 21463), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['data.data[:16, :]'], {}), '(data.data[:16, :])\n', (21444, 21463), False, 'import torchvision\n'), ((3837, 3858), 'torch.arange', 'torch.arange', (['out_num'], {}), '(out_num)\n', (3849, 3858), False, 'import torch\n'), ((4020, 4041), 'torch.arange', 'torch.arange', (['out_num'], {}), '(out_num)\n', (4032, 4041), False, 'import torch\n'), ((21868, 21911), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['o.data[:16, :]'], {}), '(o.data[:16, :])\n', (21895, 21911), False, 'import torchvision\n')] |
from pathlib import Path
import numpy as np
def load_binary(path, dtype=np.float32):
"""Loads a binary file and returns it as a numpy.ndarray.
Args:
path: String or ``pathlib.Path`` object.
dtype: Data type used to determine the size and byte-order
of the items in the file. The returned array will have the same
data type.
Returns:
Content of the binary file
"""
assert isinstance(path, (str, Path)), path
path = Path(path).expanduser()
with open(path, 'rb') as f:
data = np.fromfile(f, dtype)
return data
def reverb_signal(signal, rirs):
return np.asarray([np.convolve(signal, rir) for rir in rirs])
| [
"numpy.fromfile",
"numpy.convolve",
"pathlib.Path"
] | [((563, 584), 'numpy.fromfile', 'np.fromfile', (['f', 'dtype'], {}), '(f, dtype)\n', (574, 584), True, 'import numpy as np\n'), ((491, 501), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (495, 501), False, 'from pathlib import Path\n'), ((659, 683), 'numpy.convolve', 'np.convolve', (['signal', 'rir'], {}), '(signal, rir)\n', (670, 683), True, 'import numpy as np\n')] |
'''scripts/usefulFunctions.py'''
"""
To prevent duplicate code being pasted between methods, common functions should be
put here. No code here should make or delete files.
"""
# External libraries
import numpy as np
# Internal libraries
import subprocess
from shutil import get_terminal_size
from time import time, localtime
def getNewLength(chunks, speeds, fps):
timeInFrames = 0
for chunk in chunks:
leng = chunk[1] - chunk[0]
if(speeds[chunk[2]] < 99999):
timeInFrames += leng * (1 / speeds[chunk[2]])
return timeInFrames / fps
def getMaxVolume(s):
maxv = float(np.max(s))
minv = float(np.min(s))
return max(maxv, -minv)
def getAudioChunks(audioData, sampleRate, fps, silentT, zoomT, frameMargin):
import math
audioSampleCount = audioData.shape[0]
maxAudioVolume = getMaxVolume(audioData)
samplesPerFrame = sampleRate / fps
audioFrameCount = int(math.ceil(audioSampleCount / samplesPerFrame))
hasLoudAudio = np.zeros((audioFrameCount), dtype=np.uint8)
if(maxAudioVolume == 0):
print('Warning! The entire audio is silent')
return [[0, audioFrameCount, 1]]
for i in range(audioFrameCount):
start = int(i * samplesPerFrame)
end = min(int((i+1) * samplesPerFrame), audioSampleCount)
audiochunks = audioData[start:end]
maxchunksVolume = getMaxVolume(audiochunks) / maxAudioVolume
if(maxchunksVolume >= zoomT):
hasLoudAudio[i] = 2
elif(maxchunksVolume >= silentT):
hasLoudAudio[i] = 1
chunks = [[0, 0, 0]]
shouldIncludeFrame = np.zeros((audioFrameCount), dtype=np.uint8)
for i in range(audioFrameCount):
start = int(max(0, i - frameMargin))
end = int(min(audioFrameCount, i+1+frameMargin))
shouldIncludeFrame[i] = min(1, np.max(hasLoudAudio[start:end]))
if(i >= 1 and shouldIncludeFrame[i] != shouldIncludeFrame[i-1]):
chunks.append([chunks[-1][1], i, shouldIncludeFrame[i-1]])
chunks.append([chunks[-1][1], audioFrameCount, shouldIncludeFrame[i-1]])
chunks = chunks[1:]
return chunks
def prettyTime(newTime):
newTime = localtime(newTime)
hours = newTime.tm_hour
if(hours == 0):
hours = 12
if(hours > 12):
hours -= 12
if(newTime.tm_hour >= 12):
ampm = 'PM'
else:
ampm = 'AM'
minutes = newTime.tm_min
return f'{hours:02}:{minutes:02} {ampm}'
def vidTracks(videoFile, ffmpeg):
"""
Return the number of audio tracks in a video file.
"""
import os
import platform
dirPath = os.path.dirname(os.path.realpath(__file__))
if(ffmpeg == 'ffmpeg'):
ffprobe = 'ffprobe'
else:
if(platform.system() == 'Windows'):
ffprobe = os.path.join(dirPath, 'win-ffmpeg/bin/ffprobe.exe')
elif(platform.system() == 'Darwin'):
ffprobe = os.path.join(dirPath, 'mac-ffmpeg/unix-ffprobe')
else:
ffprobe = 'ffprobe'
cmd = [ffprobe, videoFile, '-hide_banner', '-loglevel', 'panic',
'-show_entries', 'stream=index', '-select_streams', 'a', '-of',
'compact=p=0:nk=1']
# Read what ffprobe piped in.
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, __ = process.communicate()
output = stdout.decode()
numbers = output.split('\n')
try:
test = int(numbers[0])
return len(numbers) - 1
except ValueError:
print('Warning: ffprobe had an invalid output.')
return 1
def conwrite(message):
numSpaces = get_terminal_size().columns - len(message) - 3
print(' ' + message + ' ' * numSpaces, end='\r', flush=True)
def progressBar(index, total, beginTime, title='Please wait'):
termsize = get_terminal_size().columns
barLen = max(1, termsize - (len(title) + 50))
percentDone = round((index+1) / total * 100, 1)
done = round(percentDone / (100 / barLen))
doneStr = '█' * done
togoStr = '░' * int(barLen - done)
if(percentDone == 0):
percentPerSec = 0
else:
percentPerSec = (time() - beginTime) / percentDone
newTime = prettyTime(beginTime + (percentPerSec * 100))
bar = f' ⏳{title}: [{doneStr}{togoStr}] {percentDone}% done ETA {newTime}'
if(len(bar) > termsize - 2):
bar = bar[:termsize - 2]
else:
bar += ' ' * (termsize - len(bar) - 4)
try:
print(bar, end='\r', flush=True)
except UnicodeEncodeError:
print(f' {percentDone}% done ETA {newTime}')
| [
"math.ceil",
"subprocess.Popen",
"os.path.join",
"numpy.max",
"os.path.realpath",
"shutil.get_terminal_size",
"numpy.zeros",
"platform.system",
"numpy.min",
"time.localtime",
"time.time"
] | [((997, 1038), 'numpy.zeros', 'np.zeros', (['audioFrameCount'], {'dtype': 'np.uint8'}), '(audioFrameCount, dtype=np.uint8)\n', (1005, 1038), True, 'import numpy as np\n'), ((1617, 1658), 'numpy.zeros', 'np.zeros', (['audioFrameCount'], {'dtype': 'np.uint8'}), '(audioFrameCount, dtype=np.uint8)\n', (1625, 1658), True, 'import numpy as np\n'), ((2178, 2196), 'time.localtime', 'localtime', (['newTime'], {}), '(newTime)\n', (2187, 2196), False, 'from time import time, localtime\n'), ((3228, 3299), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (3244, 3299), False, 'import subprocess\n'), ((615, 624), 'numpy.max', 'np.max', (['s'], {}), '(s)\n', (621, 624), True, 'import numpy as np\n'), ((643, 652), 'numpy.min', 'np.min', (['s'], {}), '(s)\n', (649, 652), True, 'import numpy as np\n'), ((931, 976), 'math.ceil', 'math.ceil', (['(audioSampleCount / samplesPerFrame)'], {}), '(audioSampleCount / samplesPerFrame)\n', (940, 976), False, 'import math\n'), ((2634, 2660), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2650, 2660), False, 'import os\n'), ((3804, 3823), 'shutil.get_terminal_size', 'get_terminal_size', ([], {}), '()\n', (3821, 3823), False, 'from shutil import get_terminal_size\n'), ((1839, 1870), 'numpy.max', 'np.max', (['hasLoudAudio[start:end]'], {}), '(hasLoudAudio[start:end])\n', (1845, 1870), True, 'import numpy as np\n'), ((2740, 2757), 'platform.system', 'platform.system', ([], {}), '()\n', (2755, 2757), False, 'import platform\n'), ((2795, 2846), 'os.path.join', 'os.path.join', (['dirPath', '"""win-ffmpeg/bin/ffprobe.exe"""'], {}), "(dirPath, 'win-ffmpeg/bin/ffprobe.exe')\n", (2807, 2846), False, 'import os\n'), ((2860, 2877), 'platform.system', 'platform.system', ([], {}), '()\n', (2875, 2877), False, 'import platform\n'), ((2914, 2962), 'os.path.join', 'os.path.join', (['dirPath', '"""mac-ffmpeg/unix-ffprobe"""'], {}), "(dirPath, 'mac-ffmpeg/unix-ffprobe')\n", (2926, 2962), False, 'import os\n'), ((3611, 3630), 'shutil.get_terminal_size', 'get_terminal_size', ([], {}), '()\n', (3628, 3630), False, 'from shutil import get_terminal_size\n'), ((4134, 4140), 'time.time', 'time', ([], {}), '()\n', (4138, 4140), False, 'from time import time, localtime\n')] |
import os
import sys
import time
import logging
import argparse
import matplotlib; matplotlib.use('Agg')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import filterfalse
from collections import Counter
from matplotlib import font_manager
from GetConfig import getConfig
try:
from adjustText import adjust_text
except ImportError:
print('Module [adjustText] is required, please install it by command line: pip install adjustText')
sys.exit()
'''
Description:
This module is used for clustering analysis.
Clustering method can be applied by --method option.
The program will take haplogroup file and population file as input files.
Then cluster populations by PCA or MDS method and visualize results by the plot.
'''
config = getConfig() # get config
def cluster_parser():
parser = argparse.ArgumentParser('cluster', description='(c) Y-LineageTracker: NRY haplogroup clustering analysis')
# function used for clustering analysis
parser.add_argument('cluster',
help='Pefrorm clustering analysis for NRY haplogroups.')
# required, haplogroup file
parser.add_argument('--hg',
required=True,
type=str,
action='store',
help='hg: A file containing sample ID and haplogroup of each individual.')
# required, population file
parser.add_argument('-p', '--population',
required=True,
type=str,
action='store',
help='population: A file containing sample ID and population information of each individual.')
# optional, clustering algorithm
parser.add_argument('--method',
required=False,
type=str,
action='store',
default='pca',
choices=['pca', 'mds'],
help='method: The algorithm used for clustering analysis (PCA or MDS).')
# optional, simpification mode for haplogroups in same main trunk
parser.add_argument('--level',
required=False,
type=str,
const='auto',
action='store',
nargs='?',
help='Level: the haplogroup resolution level of NRY haplogroups for clustering,\
which means the degree of similarity among haplogroups in the same main trunk.\
The parameter [auto] will set an optimal level to simplify NRY haplogroups resolution.')
# optional, output haplogroup frequence of populations
parser.add_argument('--freq',
required=False,
action='store_true',
help='frequence: Output the haplogroup frequency of each population.')
# optional, the prefix of output
parser.add_argument('-o', '--output',
required=False,
type=str,
action='store',
help='output: The prefix of output files.')
args = parser.parse_args()
return args
# print program information and write to log file
def set_log(log_file, level, args_log):
logger = logging.getLogger()
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler(log_file, mode='w')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s] - [%(levelname)s]: %(message)s')
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)
log_info = ['[Y-LineageTracker] [Cluster]',
'[Y-LineageTracker] Run Date: ' + time.asctime(time.localtime(time.time())),
'[Y-LineageTracker] Input File: %s' % args_log.hg,
'[Y-LineageTracker] Clustering Method: %s' % args_log.method,
'[Y-LineageTracker] Haplogroup Level: %s' % level]
if args_log.freq:
log_info.append('[Y-LineageTracker] Output haplogroup frequence')
print('\n')
for i in log_info:
logger.info(i)
# names of output files
def check_cluster_output(output, method, freq):
from FilesIO import get_out_path
path = get_out_path(output, 'Cluster')
level_file = path + '.level.hg'
if method == 'pca':
fig_file = path + '.PCA.fig.pdf'
vect_file = path + '.PCA.eigenvectors.txt'
val_file = path + '.PCA.eigenval.txt'
output_set = [fig_file, vect_file , val_file, level_file]
log_file = path + '.ClusterLog.PCA.log'
elif method == 'mds':
fig_file = path + '.MDS.fig.pdf'
fst_file = path + '.MDS.fst.txt'
embedding_file = path + '.MDS.embedding.txt'
output_set = [fig_file, fst_file, embedding_file, level_file]
log_file = path + '.ClusterLog.MDS.log'
if freq:
freq_file = path + '.freq.txt'
output_set.append(freq_file)
return output_set, log_file
# function of haplogroup simplification
def hap_simplify(haplogroup, level):
if haplogroup.startswith('A'):
if haplogroup.startswith('A1'):
return haplogroup[:level+1]
elif haplogroup.startswith('A0'):
zero_num = haplogroup.count('0')
if 'T' in haplogroup:
return haplogroup[:level+zero_num+1]
else:
return haplogroup[:level+zero_num]
else:
if (haplogroup.isalpha() and haplogroup.isupper()):
return haplogroup
elif haplogroup[:-1].isupper and haplogroup[:-1].isalpha() and len(haplogroup) > 2:
return haplogroup
else:
return haplogroup[:level]
# check level parameter
def check_level(level):
if level:
if level in ['auto', 'original', 'min']:
return level
else:
try:
level = int(level)
if level >= 1:
return level
else:
raise argparse.ArgumentTypeError('parameter [level] should be an integer or auto')
except ValueError:
raise argparse.ArgumentTypeError('parameter [level] should be an integer or auto')
else:
level = 'original'
return level
class ClusterAnalysis(object):
'''
This class is used to perform main clustering analysis
Four main steps are included in this class:
1. read haplogroup file and simplify NRY haplogroups
2. read population file and merge to haplogroup data
3. perform clustering analysis
4. output clustering result with figure
'''
def __init__(self):
self.logger = logging.getLogger()
# calculate Fst
# <NAME> Hill. 2002. Estimating F-statistics. Ann.Rev.Gen.
def _calculate_pairwise_fst(self, pop1_count, pop2_count):
sample_size1 = (np.sum(pop1_count)/pop1_count.size)*2
sample_size2 = (np.sum(pop2_count)/pop2_count.size)*2
pop1_frequency = np.array(pop1_count) / sample_size1
pop2_frequency = np.array(pop2_count) / sample_size2
mean_freq = (np.array(pop1_count)+np.array(pop2_count)) / (sample_size1+sample_size2)
MSP1 = sample_size1*((pop1_frequency-mean_freq)**2)
MSP2 = sample_size2*((pop2_frequency-mean_freq)**2)
MSP = MSP1 + MSP2
MSG1 = float(1/((sample_size1-1)+(sample_size2-1)))
MSG2 = (np.array(pop1_count) * (1-pop1_frequency)) + (np.array(pop2_count) * (1-pop2_frequency))
MSG = MSG1 * MSG2
nc = sample_size1+sample_size2 - (sample_size1**2+sample_size2**2)/(sample_size1+sample_size2)
theta1 = np.sum(MSP-MSG)
theta2 = np.sum(MSP+(nc-1)*MSG)
if theta2 == 0:
theta = 0
else:
theta = theta1 / theta2
return theta
# calculate pairwise Fst
def _calculate_MDS_fst(self, population_count, fst_output):
pops = population_count.index.tolist()
fst_df = pd.DataFrame(index=pops, columns=pops)
pop_num = 0
for pop1 in pops:
for pop2 in pops[pop_num:]:
if pop1 == pop2:
fst_df.at[pop2, pop1] = 0
else:
pop1_count = np.empty(shape=(2, population_count.columns.size))
pop1_count[0] = np.array(population_count.loc[pop1].tolist()).reshape(1, -1)
pop1_count[1] = (np.sum(pop1_count[0])-np.array(population_count.loc[pop1].tolist())).reshape(1, -1)
pop2_count = np.empty(shape=(2, population_count.columns.size))
pop2_count[0] = np.array(population_count.loc[pop2].tolist()).reshape(1, -1)
pop2_count[1] = (np.sum(pop2_count[0])-np.array(population_count.loc[pop2].tolist())).reshape(1, -1)
fst = self._calculate_pairwise_fst(pop1_count, pop2_count)
fst_df.at[pop2, pop1] = fst
fst_df.at[pop1, pop2] = fst
pop_num += 1
fst_df.to_csv(fst_output, sep='\t', index=True)
fst_array = np.array(fst_df)
return fst_array
# simplify haplogroups according to level
def classify_hap_data(self, level, hap_data, level_file):
if level != 'original':
# simplify to most common haplogroup
if level == 'auto':
initial = set(hap_data['Haplogroup'].map(lambda x: x[0]))
common_list = []
for i in initial:
initial_group = sorted(list(filter(lambda x: x[0] == i, hap_data['Haplogroup'])), key=lambda x: len(x))
while True:
most_common = Counter(initial_group).most_common(1)[0][0]
initial_group = [most_common if x.startswith(most_common) and len(x) > len(most_common) else x for x in initial_group]
hap_num = len(most_common)-1
for j in range(len(most_common)-1):
hap = most_common[0:hap_num]
if hap in initial_group:
initial_group = [hap if x == most_common else x for x in initial_group]
most_common = hap
hap_num -= 1
common_list.append(most_common)
initial_group = list(filterfalse(lambda x: x.startswith(most_common), initial_group))
if len(initial_group) == 0:
break
common_list = sorted(common_list, key=lambda x: len(x), reverse=True)
for i in range(len(hap_data['Haplogroup'])):
for j in common_list:
if hap_data.at[i, 'Haplogroup'].startswith(j):
hap_data.at[i, 'Haplogroup'] = j
# simplify to specific level
else:
if level == 'min':
main_trunk = config.get('HaplogroupTree', 'CommonTrunk').split(',') + config.get('HaplogroupTree', 'UpperTrunk').split(',')
level = min(map(lambda x: len(x), fiter(lambda x: x not in main_trunk, hap_data['Haplogroup'])))
hap_data['Haplogroup'] = hap_data['Haplogroup'].apply(hap_simplify, level=level)
hap_data.to_csv(level_file, sep='\t', index=False)
return hap_data
# read population data and plot clustering figure
def output_cluster(self, method, freq, output_set, hap_data, population_data):
#############################READ POPULATION DATA#############################
# merge population data for frequence construction
hap_data = pd.merge(hap_data, population_data, on='SampleID')
# count number of populations and sort them in order
num_list = []
num = 0
for i in sorted(list(set(population_data['Population'])), key=population_data.drop('SampleID', axis=1).drop_duplicates()['Population'].tolist().index):
num_list.append((num, num+population_data['Population'].tolist().count(i)))
num = num + population_data['Population'].tolist().count(i)
population_data = population_data.drop('SampleID', axis=1).drop_duplicates()
# sort according to Group for subsequent plot
if 'Group' in population_data.columns:
population_data = population_data.sort_values(by='Group')
# haplogorup frequence of populations
population_freq = pd.DataFrame(index=sorted(list(set(hap_data['Population'])),
key=population_data['Population'].tolist().index),
columns=sorted(list(set(hap_data['Haplogroup']))),
dtype=np.float)
hap_series = pd.Series(hap_data['Haplogroup'].tolist(), index=[hap_data['Population']])
if method == 'mds':
population_count = pd.DataFrame(index=sorted(list(set(hap_data['Population'])),
key=population_data['Population'].tolist().index),
columns=sorted(list(set(hap_data['Haplogroup']))),
dtype=np.int)
# count haplogroup frequence
for i in population_freq.index:
hap_list = hap_series[i].value_counts().index.tolist()
freq_list = hap_series[i].value_counts().tolist()
sum_num = sum(freq_list)
for j in range(len(hap_list)):
population_freq.at[i, hap_list[j]] = freq_list[j] / sum_num
if method == 'mds':
population_count.at[i, hap_list[j]] = freq_list[j]
population_freq = population_freq .fillna(0)
if method == 'mds':
population_count = population_count.fillna(0)
#############################PERFORM CLUSTERING ALGORITHM#############################
matrix = population_freq.to_numpy() # convert haplogroup frequency dataframe to martix
if method == 'pca': # PCA analysis
from sklearn.decomposition import PCA
pca = PCA(copy=True, whiten=False)
coordinates = pca.fit_transform(matrix)
# output PCA results
output_vect_file = output_set[1]
output_vect_data = pd.DataFrame(coordinates, index=sorted(list(set(hap_data['Population']))))
output_vect_data.to_csv(output_vect_file , sep='\t', index=True)
output_val_file = output_set[2]
output_val_data = pd.DataFrame(pca.explained_variance_)
output_val_data.to_csv(output_val_file, index=False, header=None)
elif method == 'mds': # MDS analysis
from sklearn.manifold import MDS
mds_matrix = self._calculate_MDS_fst(population_count, output_set[1])
mds = MDS(dissimilarity='precomputed')
coordinates = mds.fit_transform(mds_matrix)
# output MDS result
output_embedding_file = output_set[2]
output_embedding_data = pd.DataFrame(coordinates, index=sorted(list(set(hap_data['Population']))))
output_embedding_data.to_csv(output_embedding_file , sep='\t', index=True)
# output frequence of haplogroup in populations
if freq:
output_freq_file = output_set[-1]
population_freq.sort_index().to_csv(output_freq_file, sep='\t', index=True)
#############################START PLOT#############################
# defualt color and shape in cluster if no Group columns in population file
color_list = ['slategrey']*population_data.index.size
shape_list = ['o']*population_data.index.size
# set color according to Group labels
if 'Group' in population_data.columns:
label_list = population_data['Group'].tolist()
# if with color columns
if 'Color' in population_data.columns:
color_list = population_data['Color'].tolist()
# get cmap colors in matplotlib
else:
from FilesIO import set_color_num
color_num = len(set(label_list))
colors = set_color_num(color_num)
label_count = 0
color_list = []
colored_label = []
label_dict = {}
for i in label_list:
if i not in colored_label:
label_dict[i] = colors[label_count]
colored_label.append(i)
color_list.append(label_dict[i])
label_count += 1
else:
color_list.append(label_dict[i])
# No Group columns
elif 'Color' in population_data.columns and 'Group' not in population_data.columns:
color_list = population_data['Color'].tolist()
if 'Shape' in population_data.columns:
shape_list = population_data['Shape'].tolist()
self.logger.info('[Y-LineageTracker] Start clustering analysis')
# parameters for plot
import warnings
warnings.filterwarnings('ignore')
font_files = font_manager.findSystemFonts(fontpaths=[os.path.split(os.path.realpath(__file__))[0] + '/sans-serif'])
font_list = font_manager.createFontList(font_files)
font_manager.fontManager.ttflist.extend(font_list)
plt.rcParams['font.family']= 'sans-serif'
plt.rcParams['font.sans-serif'] = ['Arial'] # font
plt.rcParams['axes.unicode_minus'] = False
figure, ax = plt.subplots(figsize = (12, 12)) # figure size
# x and y lines of zero
ax.axvline(x=0, color='gray', linestyle='--', linewidth=1.5, alpha=0.7)
ax.axhline(y=0, color='gray', linestyle='--', linewidth=1.5, alpha=0.7)
# x and y labels
if method == 'pca':
plt.xlabel('PC1 %.2f%%'% (pca.explained_variance_ratio_[0]*100), fontsize=25)
plt.ylabel('PC2 %.2f%%'% (pca.explained_variance_ratio_[1]*100), fontsize=25)
elif method == 'mds':
plt.xlabel('Dimension1', fontsize=25)
plt.ylabel('Dimension2', fontsize=25)
plt.xticks(size=20)
plt.yticks(size=20)
# traverse populations to plot scatterplot
if 'label_list' in vars():
plot_list = []
for i in range(population_data.index.size):
plot = list(zip(color_list, shape_list, label_list))[i]
if plot in plot_list:
plt.plot(coordinates[i, 0], coordinates[i, 1], shape_list[i],
color=color_list[i],
mec='black',
markersize=15,
alpha=0.9)
else:
plt.plot(coordinates[i, 0], coordinates[i, 1], shape_list[i],
color=color_list[i],
mec='black',
label=label_list[i],
markersize=15,
alpha=0.9)
plot_list.append(plot)
plt.legend(frameon=True, fontsize=15, markerscale=1, shadow=True, edgecolor='dimgrey', loc='best')
else:
for i in range(population_data.index.size):
plt.plot(coordinates[num_list[i][0]:num_list[i][1], 0], coordinates[num_list[i][0]:num_list[i][1], 1], shape_list[i],
color=color_list[i],
mec='black',
markersize=15,
alpha=0.7)
# plot population with labels
if population_freq.index.size < 40:
texts = [plt.text(coordinates[i, 0], coordinates[i, 1], s=population_freq.index.tolist()[i], size=15) for i in range(len(matrix))]
import warnings
warnings.filterwarnings('ignore')
adjust_text(texts)
else:
self.logger.warning('[Y-LineageTracker] Too much population in the population file')
# output figure
output_fig_file = output_set[0]
plt.savefig(output_fig_file, dpi=100, bbox_inches='tight')
plt.close()
self.logger.info('[Y-LineageTracker] Clustering analysis finished')
def main():
start = time.perf_counter()
arguments = cluster_parser()
# check haplogroup level
global level
level = check_level(arguments.level)
# set of output files
output_set, log_file = check_cluster_output(arguments.output, arguments.method, arguments.freq)
# set log file
set_log(log_file, level, arguments)
from FilesIO import check_hap_input, check_population, check_overlap, time_count
# check haplogroup data
hap_data = check_hap_input(arguments.hg, 'haplogroup')
# check population data
population_data = check_population(arguments.population)
# check overlap of haplogroup data and population data
hap_data, population_data = check_overlap(hap_data, population_data)
# start cluserting analysis
cluster = ClusterAnalysis()
hap_data = cluster.classify_hap_data(level, hap_data, output_set[3])
cluster.output_cluster(arguments.method, arguments.freq, output_set, hap_data, population_data)
time_count(start)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"matplotlib.pyplot.ylabel",
"FilesIO.check_population",
"numpy.array",
"FilesIO.check_hap_input",
"matplotlib.font_manager.fontManager.ttflist.extend",
"sys.exit",
"matplotlib.font_manager.createFontList",
"argparse.ArgumentParser",
"sklearn.decompos... | [((83, 104), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (97, 104), False, 'import matplotlib\n'), ((782, 793), 'GetConfig.getConfig', 'getConfig', ([], {}), '()\n', (791, 793), False, 'from GetConfig import getConfig\n'), ((845, 956), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""cluster"""'], {'description': '"""(c) Y-LineageTracker: NRY haplogroup clustering analysis"""'}), "('cluster', description=\n '(c) Y-LineageTracker: NRY haplogroup clustering analysis')\n", (868, 956), False, 'import argparse\n'), ((3385, 3404), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3402, 3404), False, 'import logging\n'), ((3460, 3499), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {'mode': '"""w"""'}), "(log_file, mode='w')\n", (3479, 3499), False, 'import logging\n'), ((3551, 3616), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] - [%(levelname)s]: %(message)s"""'], {}), "('[%(asctime)s] - [%(levelname)s]: %(message)s')\n", (3568, 3616), False, 'import logging\n'), ((3668, 3691), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3689, 3691), False, 'import logging\n'), ((4427, 4458), 'FilesIO.get_out_path', 'get_out_path', (['output', '"""Cluster"""'], {}), "(output, 'Cluster')\n", (4439, 4458), False, 'from FilesIO import get_out_path\n'), ((20587, 20606), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (20604, 20606), False, 'import time\n'), ((21044, 21087), 'FilesIO.check_hap_input', 'check_hap_input', (['arguments.hg', '"""haplogroup"""'], {}), "(arguments.hg, 'haplogroup')\n", (21059, 21087), False, 'from FilesIO import check_hap_input, check_population, check_overlap, time_count\n'), ((21139, 21177), 'FilesIO.check_population', 'check_population', (['arguments.population'], {}), '(arguments.population)\n', (21155, 21177), False, 'from FilesIO import check_hap_input, check_population, check_overlap, time_count\n'), ((21270, 21310), 'FilesIO.check_overlap', 'check_overlap', (['hap_data', 'population_data'], {}), '(hap_data, population_data)\n', (21283, 21310), False, 'from FilesIO import check_hap_input, check_population, check_overlap, time_count\n'), ((21554, 21571), 'FilesIO.time_count', 'time_count', (['start'], {}), '(start)\n', (21564, 21571), False, 'from FilesIO import check_hap_input, check_population, check_overlap, time_count\n'), ((484, 494), 'sys.exit', 'sys.exit', ([], {}), '()\n', (492, 494), False, 'import sys\n'), ((6862, 6881), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6879, 6881), False, 'import logging\n'), ((7833, 7850), 'numpy.sum', 'np.sum', (['(MSP - MSG)'], {}), '(MSP - MSG)\n', (7839, 7850), True, 'import numpy as np\n'), ((7866, 7894), 'numpy.sum', 'np.sum', (['(MSP + (nc - 1) * MSG)'], {}), '(MSP + (nc - 1) * MSG)\n', (7872, 7894), True, 'import numpy as np\n'), ((8166, 8204), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'pops', 'columns': 'pops'}), '(index=pops, columns=pops)\n', (8178, 8204), True, 'import pandas as pd\n'), ((9273, 9289), 'numpy.array', 'np.array', (['fst_df'], {}), '(fst_df)\n', (9281, 9289), True, 'import numpy as np\n'), ((11898, 11948), 'pandas.merge', 'pd.merge', (['hap_data', 'population_data'], {'on': '"""SampleID"""'}), "(hap_data, population_data, on='SampleID')\n", (11906, 11948), True, 'import pandas as pd\n'), ((17386, 17419), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (17409, 17419), False, 'import warnings\n'), ((17564, 17603), 'matplotlib.font_manager.createFontList', 'font_manager.createFontList', (['font_files'], {}), '(font_files)\n', (17591, 17603), False, 'from matplotlib import font_manager\n'), ((17612, 17662), 'matplotlib.font_manager.fontManager.ttflist.extend', 'font_manager.fontManager.ttflist.extend', (['font_list'], {}), '(font_list)\n', (17651, 17662), False, 'from matplotlib import font_manager\n'), ((17844, 17874), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (17856, 17874), True, 'import matplotlib.pyplot as plt\n'), ((18459, 18478), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(20)'}), '(size=20)\n', (18469, 18478), True, 'import matplotlib.pyplot as plt\n'), ((18487, 18506), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(20)'}), '(size=20)\n', (18497, 18506), True, 'import matplotlib.pyplot as plt\n'), ((20404, 20462), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_fig_file'], {'dpi': '(100)', 'bbox_inches': '"""tight"""'}), "(output_fig_file, dpi=100, bbox_inches='tight')\n", (20415, 20462), True, 'import matplotlib.pyplot as plt\n'), ((20471, 20482), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20480, 20482), True, 'import matplotlib.pyplot as plt\n'), ((7180, 7200), 'numpy.array', 'np.array', (['pop1_count'], {}), '(pop1_count)\n', (7188, 7200), True, 'import numpy as np\n'), ((7241, 7261), 'numpy.array', 'np.array', (['pop2_count'], {}), '(pop2_count)\n', (7249, 7261), True, 'import numpy as np\n'), ((14370, 14398), 'sklearn.decomposition.PCA', 'PCA', ([], {'copy': '(True)', 'whiten': '(False)'}), '(copy=True, whiten=False)\n', (14373, 14398), False, 'from sklearn.decomposition import PCA\n'), ((14786, 14823), 'pandas.DataFrame', 'pd.DataFrame', (['pca.explained_variance_'], {}), '(pca.explained_variance_)\n', (14798, 14823), True, 'import pandas as pd\n'), ((18150, 18235), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('PC1 %.2f%%' % (pca.explained_variance_ratio_[0] * 100))"], {'fontsize': '(25)'}), "('PC1 %.2f%%' % (pca.explained_variance_ratio_[0] * 100),\n fontsize=25)\n", (18160, 18235), True, 'import matplotlib.pyplot as plt\n'), ((18241, 18326), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('PC2 %.2f%%' % (pca.explained_variance_ratio_[1] * 100))"], {'fontsize': '(25)'}), "('PC2 %.2f%%' % (pca.explained_variance_ratio_[1] * 100),\n fontsize=25)\n", (18251, 18326), True, 'import matplotlib.pyplot as plt\n'), ((19426, 19529), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'fontsize': '(15)', 'markerscale': '(1)', 'shadow': '(True)', 'edgecolor': '"""dimgrey"""', 'loc': '"""best"""'}), "(frameon=True, fontsize=15, markerscale=1, shadow=True, edgecolor\n ='dimgrey', loc='best')\n", (19436, 19529), True, 'import matplotlib.pyplot as plt\n'), ((20155, 20188), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (20178, 20188), False, 'import warnings\n'), ((20201, 20219), 'adjustText.adjust_text', 'adjust_text', (['texts'], {}), '(texts)\n', (20212, 20219), False, 'from adjustText import adjust_text\n'), ((7054, 7072), 'numpy.sum', 'np.sum', (['pop1_count'], {}), '(pop1_count)\n', (7060, 7072), True, 'import numpy as np\n'), ((7116, 7134), 'numpy.sum', 'np.sum', (['pop2_count'], {}), '(pop2_count)\n', (7122, 7134), True, 'import numpy as np\n'), ((7299, 7319), 'numpy.array', 'np.array', (['pop1_count'], {}), '(pop1_count)\n', (7307, 7319), True, 'import numpy as np\n'), ((7320, 7340), 'numpy.array', 'np.array', (['pop2_count'], {}), '(pop2_count)\n', (7328, 7340), True, 'import numpy as np\n'), ((7596, 7616), 'numpy.array', 'np.array', (['pop1_count'], {}), '(pop1_count)\n', (7604, 7616), True, 'import numpy as np\n'), ((7642, 7662), 'numpy.array', 'np.array', (['pop2_count'], {}), '(pop2_count)\n', (7650, 7662), True, 'import numpy as np\n'), ((15092, 15124), 'sklearn.manifold.MDS', 'MDS', ([], {'dissimilarity': '"""precomputed"""'}), "(dissimilarity='precomputed')\n", (15095, 15124), False, 'from sklearn.manifold import MDS\n'), ((16436, 16460), 'FilesIO.set_color_num', 'set_color_num', (['color_num'], {}), '(color_num)\n', (16449, 16460), False, 'from FilesIO import set_color_num\n'), ((18362, 18399), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dimension1"""'], {'fontsize': '(25)'}), "('Dimension1', fontsize=25)\n", (18372, 18399), True, 'import matplotlib.pyplot as plt\n'), ((18412, 18449), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dimension2"""'], {'fontsize': '(25)'}), "('Dimension2', fontsize=25)\n", (18422, 18449), True, 'import matplotlib.pyplot as plt\n'), ((19611, 19797), 'matplotlib.pyplot.plot', 'plt.plot', (['coordinates[num_list[i][0]:num_list[i][1], 0]', 'coordinates[num_list[i][0]:num_list[i][1], 1]', 'shape_list[i]'], {'color': 'color_list[i]', 'mec': '"""black"""', 'markersize': '(15)', 'alpha': '(0.7)'}), "(coordinates[num_list[i][0]:num_list[i][1], 0], coordinates[\n num_list[i][0]:num_list[i][1], 1], shape_list[i], color=color_list[i],\n mec='black', markersize=15, alpha=0.7)\n", (19619, 19797), True, 'import matplotlib.pyplot as plt\n'), ((3917, 3928), 'time.time', 'time.time', ([], {}), '()\n', (3926, 3928), False, 'import time\n'), ((6200, 6276), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""parameter [level] should be an integer or auto"""'], {}), "('parameter [level] should be an integer or auto')\n", (6226, 6276), False, 'import argparse\n'), ((6330, 6406), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""parameter [level] should be an integer or auto"""'], {}), "('parameter [level] should be an integer or auto')\n", (6356, 6406), False, 'import argparse\n'), ((8425, 8475), 'numpy.empty', 'np.empty', ([], {'shape': '(2, population_count.columns.size)'}), '(shape=(2, population_count.columns.size))\n', (8433, 8475), True, 'import numpy as np\n'), ((8727, 8777), 'numpy.empty', 'np.empty', ([], {'shape': '(2, population_count.columns.size)'}), '(shape=(2, population_count.columns.size))\n', (8735, 8777), True, 'import numpy as np\n'), ((18807, 18933), 'matplotlib.pyplot.plot', 'plt.plot', (['coordinates[i, 0]', 'coordinates[i, 1]', 'shape_list[i]'], {'color': 'color_list[i]', 'mec': '"""black"""', 'markersize': '(15)', 'alpha': '(0.9)'}), "(coordinates[i, 0], coordinates[i, 1], shape_list[i], color=\n color_list[i], mec='black', markersize=15, alpha=0.9)\n", (18815, 18933), True, 'import matplotlib.pyplot as plt\n'), ((19087, 19234), 'matplotlib.pyplot.plot', 'plt.plot', (['coordinates[i, 0]', 'coordinates[i, 1]', 'shape_list[i]'], {'color': 'color_list[i]', 'mec': '"""black"""', 'label': 'label_list[i]', 'markersize': '(15)', 'alpha': '(0.9)'}), "(coordinates[i, 0], coordinates[i, 1], shape_list[i], color=\n color_list[i], mec='black', label=label_list[i], markersize=15, alpha=0.9)\n", (19095, 19234), True, 'import matplotlib.pyplot as plt\n'), ((8610, 8631), 'numpy.sum', 'np.sum', (['pop1_count[0]'], {}), '(pop1_count[0])\n', (8616, 8631), True, 'import numpy as np\n'), ((8912, 8933), 'numpy.sum', 'np.sum', (['pop2_count[0]'], {}), '(pop2_count[0])\n', (8918, 8933), True, 'import numpy as np\n'), ((17495, 17521), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (17511, 17521), False, 'import os\n'), ((9874, 9896), 'collections.Counter', 'Counter', (['initial_group'], {}), '(initial_group)\n', (9881, 9896), False, 'from collections import Counter\n')] |
"""A simple 1D Maxwell Solver
This one is based on 'maxvell1D.m' matlab routine by <NAME>.
(c) <NAME> 2012, mailto: <EMAIL>
"""
import numpy as np ## numeric routines; arrays
import pylab as plt ## plotting
# Set plotting parameters
params = {'axes.labelsize': 'large',
'xtick.labelsize': 'medium',
'ytick.labelsize': 'medium',
'font.size': 10,
'text.usetex': False}
plt.rcParams.update(params)
## Switch on interactive plotting mode
#plt.ion()
"""
dB/dt=-curl E
eps0 mu0 dE/dt= curl B - mu0 J
( curl A )_y = - dA_z/dx
( curl A )_z = dA_y/dx
"""
mu0 = 4. * np.pi * 1e-7
eps0 = 8.8542 * 1.e-12
c = 1. / (mu0 * eps0)**0.5
Lx = 1e6
Time = Lx / c / 4
nx = 1000
dx = Lx / nx
x = np.arange(nx) * dx #0:dx:Lx-dx
dt = dx / c / 2
Ncycles = int(Time / dt)
k = 2 * np.pi / Lx * 3
"""Choose initial condition: pure wave, wave packet, ot random noise"""
## Pure wave
"""
Ey = np.sin(k * x)
Bz = np.sin(k * (x + dx/2)) / c
"""
## Wave packet
sigma = Lx / 100
Ey = np.exp(-(x - Lx/2)**2 / sigma**2)
Bz = np.zeros(nx)
## Random noise
"""
Ey = np.random.randn(nx)
Bz = np.random.randn(nx)
"""
plt.figure('maxwell1D')
plt.subplot(2, 2, 1)
plt.plot(x, Ey, 'r', label='Ey')
plt.xlabel('x')
plt.legend(loc=1)
plt.subplot(2, 2, 2)
plt.plot(x+dx/2, Bz, 'b', label='Bz')
plt.xlabel('x')
plt.legend(loc=1)
plt.pause(0.0001)
plt.draw()
curlEz = np.zeros(nx)
curlBy = np.zeros(nx)
# Empty lists for storing the computation results
etm = []
btm = []
t = []
for it in range(1, Ncycles) :
## compute central difference, assuming periodicity
curlEz[0:-1] = (Ey[1:] - Ey[0:-1]) / dx
curlEz[-1] = (Ey[0] - Ey[len(Ey)-1]) / dx
Bz = Bz - dt*curlEz
curlBy[1:] = -(Bz[1:] - Bz[0:-1]) / dx
curlBy[0] = -(Bz[0] - Bz[-1]) / dx
Ey = Ey + dt*curlBy/mu0/eps0
if np.mod(it, Ncycles/10) == 0 :
plt.clf()
plt.subplot(2, 2, 1)
plt.plot(x, Ey, 'r', label='Ey')
plt.title('t=%f' % (it*dt))
plt.xlabel('x')
plt.legend(loc=1)
plt.subplot(2, 2, 2)
plt.plot(x+dx/2, Bz, 'b', label='Bz')
plt.xlabel('x')
plt.legend(loc=1)
plt.pause(0.0001)
plt.draw()
etm.append(Ey)
btm.append(Bz)
t.append(it*dt)
# convert the lists to np.ndarray
etm = np.array(etm)
btm = np.array(btm)
t = np.array(t)
## Plot the variations of electric field
plt.hold(True)
plt.subplot(2, 2, 3)
plt.title('Ey variations')
plt.pcolor(x, t, etm)
plt.xlabel('x')
plt.ylabel('t')
## Plot light cone
plt.plot([Lx/2, Lx/2-c*Time], [0, Time], color='w', linewidth=2)
plt.plot([Lx/2, Lx/2+c*Time], [0, Time], color='w', linewidth=2)
plt.pause(0.0001)
plt.draw()
#plt.figure('Spectra')
etmf2 = np.fft.fft2(etm) # should be fft2
# FFT variable in time
Fs = 1 / dt ## Sampling frequency
T = 1/Fs ## Sample time
Nt = len(t) ## Length of signal
w = Fs * np.pi * np.linspace(0, 1, Nt/2+1)
# FFT variable in space
Fs = 1 / dx ## Sampling frequency
T = 1 / Fs ## Sample length
Nx = len(x) ## Length of signal
k = Fs * np.pi * np.linspace(0, 1, Nx/2+1)
# Plot the spectrum
plt.subplot(2, 2, 4)
plt.title('Spectrum')
plt.pcolor(k, w, abs(etmf2[0:Nt/2+1, 1:Nx/2+1]))
plt.plot(k, k*c, 'w')
plt.xlabel('k')
plt.ylabel('omega')
plt.plot(k, 2*np.arcsin(c * dt/dx * np.sin(k*dx/2))/dt)
plt.pause(0.0001)
plt.draw()
plt.show()
| [
"pylab.title",
"pylab.xlabel",
"pylab.hold",
"numpy.array",
"numpy.sin",
"numpy.mod",
"numpy.arange",
"pylab.draw",
"pylab.plot",
"numpy.fft.fft2",
"numpy.exp",
"numpy.linspace",
"pylab.rcParams.update",
"pylab.clf",
"pylab.subplot",
"pylab.figure",
"pylab.pause",
"pylab.pcolor",
... | [((450, 477), 'pylab.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (469, 477), True, 'import pylab as plt\n'), ((1097, 1136), 'numpy.exp', 'np.exp', (['(-(x - Lx / 2) ** 2 / sigma ** 2)'], {}), '(-(x - Lx / 2) ** 2 / sigma ** 2)\n', (1103, 1136), True, 'import numpy as np\n'), ((1137, 1149), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (1145, 1149), True, 'import numpy as np\n'), ((1236, 1259), 'pylab.figure', 'plt.figure', (['"""maxwell1D"""'], {}), "('maxwell1D')\n", (1246, 1259), True, 'import pylab as plt\n'), ((1262, 1282), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1273, 1282), True, 'import pylab as plt\n'), ((1284, 1316), 'pylab.plot', 'plt.plot', (['x', 'Ey', '"""r"""'], {'label': '"""Ey"""'}), "(x, Ey, 'r', label='Ey')\n", (1292, 1316), True, 'import pylab as plt\n'), ((1318, 1333), 'pylab.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1328, 1333), True, 'import pylab as plt\n'), ((1335, 1352), 'pylab.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (1345, 1352), True, 'import pylab as plt\n'), ((1354, 1374), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1365, 1374), True, 'import pylab as plt\n'), ((1376, 1417), 'pylab.plot', 'plt.plot', (['(x + dx / 2)', 'Bz', '"""b"""'], {'label': '"""Bz"""'}), "(x + dx / 2, Bz, 'b', label='Bz')\n", (1384, 1417), True, 'import pylab as plt\n'), ((1415, 1430), 'pylab.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1425, 1430), True, 'import pylab as plt\n'), ((1432, 1449), 'pylab.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (1442, 1449), True, 'import pylab as plt\n'), ((1451, 1468), 'pylab.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (1460, 1468), True, 'import pylab as plt\n'), ((1470, 1480), 'pylab.draw', 'plt.draw', ([], {}), '()\n', (1478, 1480), True, 'import pylab as plt\n'), ((1493, 1505), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (1501, 1505), True, 'import numpy as np\n'), ((1516, 1528), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (1524, 1528), True, 'import numpy as np\n'), ((2454, 2467), 'numpy.array', 'np.array', (['etm'], {}), '(etm)\n', (2462, 2467), True, 'import numpy as np\n'), ((2475, 2488), 'numpy.array', 'np.array', (['btm'], {}), '(btm)\n', (2483, 2488), True, 'import numpy as np\n'), ((2494, 2505), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2502, 2505), True, 'import numpy as np\n'), ((2551, 2565), 'pylab.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (2559, 2565), True, 'import pylab as plt\n'), ((2567, 2587), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2578, 2587), True, 'import pylab as plt\n'), ((2589, 2615), 'pylab.title', 'plt.title', (['"""Ey variations"""'], {}), "('Ey variations')\n", (2598, 2615), True, 'import pylab as plt\n'), ((2617, 2638), 'pylab.pcolor', 'plt.pcolor', (['x', 't', 'etm'], {}), '(x, t, etm)\n', (2627, 2638), True, 'import pylab as plt\n'), ((2640, 2655), 'pylab.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2650, 2655), True, 'import pylab as plt\n'), ((2657, 2672), 'pylab.ylabel', 'plt.ylabel', (['"""t"""'], {}), "('t')\n", (2667, 2672), True, 'import pylab as plt\n'), ((2694, 2766), 'pylab.plot', 'plt.plot', (['[Lx / 2, Lx / 2 - c * Time]', '[0, Time]'], {'color': '"""w"""', 'linewidth': '(2)'}), "([Lx / 2, Lx / 2 - c * Time], [0, Time], color='w', linewidth=2)\n", (2702, 2766), True, 'import pylab as plt\n'), ((2760, 2832), 'pylab.plot', 'plt.plot', (['[Lx / 2, Lx / 2 + c * Time]', '[0, Time]'], {'color': '"""w"""', 'linewidth': '(2)'}), "([Lx / 2, Lx / 2 + c * Time], [0, Time], color='w', linewidth=2)\n", (2768, 2832), True, 'import pylab as plt\n'), ((2826, 2843), 'pylab.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (2835, 2843), True, 'import pylab as plt\n'), ((2845, 2855), 'pylab.draw', 'plt.draw', ([], {}), '()\n', (2853, 2855), True, 'import pylab as plt\n'), ((2891, 2907), 'numpy.fft.fft2', 'np.fft.fft2', (['etm'], {}), '(etm)\n', (2902, 2907), True, 'import numpy as np\n'), ((3280, 3300), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (3291, 3300), True, 'import pylab as plt\n'), ((3302, 3323), 'pylab.title', 'plt.title', (['"""Spectrum"""'], {}), "('Spectrum')\n", (3311, 3323), True, 'import pylab as plt\n'), ((3375, 3398), 'pylab.plot', 'plt.plot', (['k', '(k * c)', '"""w"""'], {}), "(k, k * c, 'w')\n", (3383, 3398), True, 'import pylab as plt\n'), ((3398, 3413), 'pylab.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (3408, 3413), True, 'import pylab as plt\n'), ((3415, 3434), 'pylab.ylabel', 'plt.ylabel', (['"""omega"""'], {}), "('omega')\n", (3425, 3434), True, 'import pylab as plt\n'), ((3493, 3510), 'pylab.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (3502, 3510), True, 'import pylab as plt\n'), ((3512, 3522), 'pylab.draw', 'plt.draw', ([], {}), '()\n', (3520, 3522), True, 'import pylab as plt\n'), ((3526, 3536), 'pylab.show', 'plt.show', ([], {}), '()\n', (3534, 3536), True, 'import pylab as plt\n'), ((802, 815), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (811, 815), True, 'import numpy as np\n'), ((3062, 3091), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(Nt / 2 + 1)'], {}), '(0, 1, Nt / 2 + 1)\n', (3073, 3091), True, 'import numpy as np\n'), ((3230, 3259), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(Nx / 2 + 1)'], {}), '(0, 1, Nx / 2 + 1)\n', (3241, 3259), True, 'import numpy as np\n'), ((1939, 1963), 'numpy.mod', 'np.mod', (['it', '(Ncycles / 10)'], {}), '(it, Ncycles / 10)\n', (1945, 1963), True, 'import numpy as np\n'), ((1978, 1987), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (1985, 1987), True, 'import pylab as plt\n'), ((1997, 2017), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (2008, 2017), True, 'import pylab as plt\n'), ((2035, 2067), 'pylab.plot', 'plt.plot', (['x', 'Ey', '"""r"""'], {'label': '"""Ey"""'}), "(x, Ey, 'r', label='Ey')\n", (2043, 2067), True, 'import pylab as plt\n'), ((2085, 2114), 'pylab.title', 'plt.title', (["('t=%f' % (it * dt))"], {}), "('t=%f' % (it * dt))\n", (2094, 2114), True, 'import pylab as plt\n'), ((2122, 2137), 'pylab.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2132, 2137), True, 'import pylab as plt\n'), ((2147, 2164), 'pylab.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (2157, 2164), True, 'import pylab as plt\n'), ((2182, 2202), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2193, 2202), True, 'import pylab as plt\n'), ((2212, 2253), 'pylab.plot', 'plt.plot', (['(x + dx / 2)', 'Bz', '"""b"""'], {'label': '"""Bz"""'}), "(x + dx / 2, Bz, 'b', label='Bz')\n", (2220, 2253), True, 'import pylab as plt\n'), ((2259, 2274), 'pylab.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2269, 2274), True, 'import pylab as plt\n'), ((2284, 2301), 'pylab.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (2294, 2301), True, 'import pylab as plt\n'), ((2311, 2328), 'pylab.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (2320, 2328), True, 'import pylab as plt\n'), ((2338, 2348), 'pylab.draw', 'plt.draw', ([], {}), '()\n', (2346, 2348), True, 'import pylab as plt\n'), ((3472, 3490), 'numpy.sin', 'np.sin', (['(k * dx / 2)'], {}), '(k * dx / 2)\n', (3478, 3490), True, 'import numpy as np\n')] |
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from SVC_Utils import *
from sklearn.metrics import roc_curve, auc
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def eval_target_net(net, testloader, classes=None):
if classes is not None:
class_correct = np.zeros(10)
class_total = np.zeros(10)
total = 0
correct = 0
with torch.no_grad():
net.eval()
for i, (imgs, lbls) in enumerate(testloader):
imgs, lbls = imgs.to(device), lbls.to(device)
output = net(imgs)
predicted = output.argmax(dim=1)
total += imgs.size(0)
correct += predicted.eq(lbls).sum().item()
if classes is not None:
for prediction, lbl in zip(predicted, lbls):
class_correct[lbl] += prediction == lbl
class_total[lbl] += 1
accuracy = 100*(correct/total)
if classes is not None:
for i in range(len(classes)):
print('Accuracy of %s : %.2f %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
print("\nAccuracy = %.2f %%\n\n" % (accuracy) )
return accuracy
def eval_attack_net(attack_net, target, target_train, target_out, k):
"""Assess accuracy, precision, and recall of attack model for in training set/out of training set classification.
Edited for use with SVCs."""
in_predicts=[]
out_predicts=[]
losses = []
if type(target) is not Pipeline:
target_net=target
target_net.eval()
attack_net.eval()
precisions = []
recalls = []
accuracies = []
#for threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
train_top = np.empty((0,2))
out_top = np.empty((0,2))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
mini_batch_size = train_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
#[mini_batch_size x num_classes] tensors, (0,1) probabilities for each class for each sample)
if type(target) is Pipeline:
traininputs=train_imgs.view(train_imgs.shape[0], -1)
outinputs=out_imgs.view(out_imgs.shape[0], -1)
train_posteriors=torch.from_numpy(target.predict_proba(traininputs)).float()
out_posteriors=torch.from_numpy(target.predict_proba(outinputs)).float()
else:
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
#[k x mini_batch_size] tensors, (0,1) probabilities for top k probable classes
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
#Collects probabilities for predicted class.
for p in train_top_k:
in_predicts.append((p.max()).item())
for p in out_top_k:
out_predicts.append((p.max()).item())
if type(target) is not Pipeline:
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
#print("train_top_k = ",train_top_k)
#print("out_top_k = ",out_top_k)
#print(train_top.shape)
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(mini_batch_size).to(device)
#Takes in probabilities for top k most likely classes, outputs ~1 (in training set) or ~0 (out of training set)
train_predictions = F.sigmoid(torch.squeeze(attack_net(train_top_k)))
out_predictions = F.sigmoid(torch.squeeze(attack_net(out_top_k)))
for j, t in enumerate(thresholds):
true_positives[j] += (train_predictions >= t).sum().item()
false_positives[j] += (out_predictions >= t).sum().item()
false_negatives[j] += (train_predictions < t).sum().item()
#print(train_top >= threshold)
#print((train_top >= threshold).sum().item(),',',(out_top >= threshold).sum().item())
correct[j] += (train_predictions >= t).sum().item()
correct[j] += (out_predictions < t).sum().item()
total[j] += train_predictions.size(0) + out_predictions.size(0)
#print(true_positives,',',false_positives,',',false_negatives)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = true_positives[j] / (true_positives[j] + false_positives[j]) if true_positives[j] + false_positives[j] != 0 else 0
recall = true_positives[j] / (true_positives[j] + false_negatives[j]) if true_positives[j] + false_negatives[j] !=0 else 0
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = %.4f, accuracy = %.2f, precision = %.2f, recall = %.2f" % (t, accuracy, precision, recall))
plt.plot(recalls, precisions)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
def eval_attack_roc(attack_net, target_net, target_train, target_out, k):
losses = []
target_net.eval()
attack_net.eval()
total = 0
correct = 0
train_top = np.empty((0,2))
out_top = np.empty((0,2))
true_positives = 0
false_positives = 0
false_negatives = 0
predictions = np.array([])
labels = np.array([])
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
train_size = train_imgs.shape[0]
out_size = out_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
#print("train_top_k = ",train_top_k)
#print("out_top_k = ",out_top_k)
train_lbl = torch.ones(train_size).to(device)
out_lbl = torch.zeros(out_size).to(device)
train_predictions = F.sigmoid(torch.squeeze(attack_net(train_top_k)))
out_predictions = F.sigmoid(torch.squeeze(attack_net(out_top_k)))
predictions = np.concatenate((predictions, train_predictions.detach().cpu().numpy()), axis=0)
labels = np.concatenate((labels, np.ones(train_size)), axis=0)
predictions = np.concatenate((predictions, out_predictions.detach().cpu().numpy()), axis=0)
labels = np.concatenate((labels, np.zeros(out_size)), axis=0)
#print("train_predictions = ",train_predictions)
#print("out_predictions = ",out_predictions)
true_positives += (train_predictions >= 0.5).sum().item()
false_positives += (out_predictions >= 0.5).sum().item()
false_negatives += (train_predictions < 0.5).sum().item()
correct += (train_predictions>=0.5).sum().item()
correct += (out_predictions<0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
accuracy = 100 * correct / total
precision = true_positives / (true_positives + false_positives) if true_positives + false_positives != 0 else 0
recall = true_positives / (true_positives + false_negatives) if true_positives + false_negatives !=0 else 0
print("Membership Inference Performance")
print("Accuracy = %.2f%%, Precision = %.2f, Recall = %.2f" % (accuracy, precision, recall))
fpr, tpr, thresholds = roc_curve(labels, predictions, pos_label=1)
roc_auc = auc(fpr, tpr)
return fpr, tpr, roc_auc
def eval_membership_inference(target_net, target_train, target_out):
target_net.eval()
precisions = []
recalls = []
accuracies = []
#for threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
mini_batch_size = train_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top = train_sort[:,0].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top = out_sort[:,0].clone().to(device)
#print(train_top.shape)
for j, t in enumerate(thresholds):
true_positives[j] += (train_top >= t).sum().item()
false_positives[j] += (out_top >= t).sum().item()
false_negatives[j] += (train_top < t).sum().item()
#print(train_top >= threshold)
#print((train_top >= threshold).sum().item(),',',(out_top >= threshold).sum().item())
correct[j] += (train_top >= t).sum().item()
correct[j] += (out_top < t).sum().item()
total[j] += train_top.size(0) + out_top.size(0)
#print(true_positives,',',false_positives,',',false_negatives)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = true_positives[j] / (true_positives[j] + false_positives[j]) if true_positives[j] + false_positives[j] != 0 else 0
recall = true_positives[j] / (true_positives[j] + false_negatives[j]) if true_positives[j] + false_negatives[j] !=0 else 0
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = %.4f, accuracy = %.2f, precision = %.2f, recall = %.2f" % (t, accuracy, precision, recall))
plt.plot(recalls, precisions)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
| [
"torch.sort",
"torch.ones",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.zeros",
"sklearn.metrics.roc_curve",
"numpy.empty",
"torch.cuda.is_available",
"torch.no_grad",
"torch.zeros",
"numpy.a... | [((1888, 1912), 'numpy.arange', 'np.arange', (['(0.5)', '(1)', '(0.005)'], {}), '(0.5, 1, 0.005)\n', (1897, 1912), True, 'import numpy as np\n'), ((2157, 2173), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (2165, 2173), True, 'import numpy as np\n'), ((2187, 2203), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (2195, 2203), True, 'import numpy as np\n'), ((5654, 5683), 'matplotlib.pyplot.plot', 'plt.plot', (['recalls', 'precisions'], {}), '(recalls, precisions)\n', (5662, 5683), True, 'import matplotlib.pyplot as plt\n'), ((5688, 5708), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (5698, 5708), True, 'import matplotlib.pyplot as plt\n'), ((5713, 5736), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (5723, 5736), True, 'import matplotlib.pyplot as plt\n'), ((5741, 5751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5749, 5751), True, 'import matplotlib.pyplot as plt\n'), ((5945, 5961), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (5953, 5961), True, 'import numpy as np\n'), ((5975, 5991), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (5983, 5991), True, 'import numpy as np\n'), ((6086, 6098), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6094, 6098), True, 'import numpy as np\n'), ((6112, 6124), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6120, 6124), True, 'import numpy as np\n'), ((8579, 8622), 'sklearn.metrics.roc_curve', 'roc_curve', (['labels', 'predictions'], {'pos_label': '(1)'}), '(labels, predictions, pos_label=1)\n', (8588, 8622), False, 'from sklearn.metrics import roc_curve, auc\n'), ((8637, 8650), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (8640, 8650), False, 'from sklearn.metrics import roc_curve, auc\n'), ((8902, 8926), 'numpy.arange', 'np.arange', (['(0.5)', '(1)', '(0.005)'], {}), '(0.5, 1, 0.005)\n', (8911, 8926), True, 'import numpy as np\n'), ((10986, 11015), 'matplotlib.pyplot.plot', 'plt.plot', (['recalls', 'precisions'], {}), '(recalls, precisions)\n', (10994, 11015), True, 'import matplotlib.pyplot as plt\n'), ((11020, 11040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (11030, 11040), True, 'import matplotlib.pyplot as plt\n'), ((11045, 11068), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (11055, 11068), True, 'import matplotlib.pyplot as plt\n'), ((11073, 11083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11081, 11083), True, 'import matplotlib.pyplot as plt\n'), ((301, 326), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (324, 326), False, 'import torch\n'), ((446, 458), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (454, 458), True, 'import numpy as np\n'), ((481, 493), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (489, 493), True, 'import numpy as np\n'), ((533, 548), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (546, 548), False, 'import torch\n'), ((3184, 3229), 'torch.sort', 'torch.sort', (['train_posteriors'], {'descending': '(True)'}), '(train_posteriors, descending=True)\n', (3194, 3229), False, 'import torch\n'), ((3311, 3354), 'torch.sort', 'torch.sort', (['out_posteriors'], {'descending': '(True)'}), '(out_posteriors, descending=True)\n', (3321, 3354), False, 'import torch\n'), ((6553, 6598), 'torch.sort', 'torch.sort', (['train_posteriors'], {'descending': '(True)'}), '(train_posteriors, descending=True)\n', (6563, 6598), False, 'import torch\n'), ((6680, 6723), 'torch.sort', 'torch.sort', (['out_posteriors'], {'descending': '(True)'}), '(out_posteriors, descending=True)\n', (6690, 6723), False, 'import torch\n'), ((9538, 9583), 'torch.sort', 'torch.sort', (['train_posteriors'], {'descending': '(True)'}), '(train_posteriors, descending=True)\n', (9548, 9583), False, 'import torch\n'), ((9662, 9705), 'torch.sort', 'torch.sort', (['out_posteriors'], {'descending': '(True)'}), '(out_posteriors, descending=True)\n', (9672, 9705), False, 'import torch\n'), ((4006, 4033), 'torch.ones', 'torch.ones', (['mini_batch_size'], {}), '(mini_batch_size)\n', (4016, 4033), False, 'import torch\n'), ((4063, 4091), 'torch.zeros', 'torch.zeros', (['mini_batch_size'], {}), '(mini_batch_size)\n', (4074, 4091), False, 'import torch\n'), ((7051, 7073), 'torch.ones', 'torch.ones', (['train_size'], {}), '(train_size)\n', (7061, 7073), False, 'import torch\n'), ((7103, 7124), 'torch.zeros', 'torch.zeros', (['out_size'], {}), '(out_size)\n', (7114, 7124), False, 'import torch\n'), ((7442, 7461), 'numpy.ones', 'np.ones', (['train_size'], {}), '(train_size)\n', (7449, 7461), True, 'import numpy as np\n'), ((7613, 7631), 'numpy.zeros', 'np.zeros', (['out_size'], {}), '(out_size)\n', (7621, 7631), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Utility functions."""
import os
import random
import tempfile
import matplotlib.pyplot as plt
import numpy as np
from moviepy import editor as mpy
import tensorflow as tf
def display_data(data, num_img):
"""Display some data from the dataset."""
# pylint: disable=invalid-name
d = [x for x in data.unbatch().take(num_img).batch(num_img)][0]
fig = plt.figure(figsize=(20, 4 * num_img))
for ind in range(num_img):
for i in range(10):
sp = fig.add_subplot(2 * num_img, 10, ind * 20 + 1 + i)
sp.axis("off")
plt.imshow(d["input"][ind, i])
for i in range(10):
sp = fig.add_subplot(2 * num_img, 10, ind * 20 + 11 + i)
sp.axis("off")
plt.imshow(d["target"][ind, i])
plt.show()
def set_random_seed(seed):
"""Set random seed."""
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
def create_gif(images, fps=3):
"""Create gif."""
# pylint: disable=invalid-name
images = (np.minimum(np.maximum(images, 0), 1) * 255
).astype(np.uint8)
clip = mpy.ImageSequenceClip(list(images), fps=fps)
with tempfile.NamedTemporaryFile() as f:
filename = f.name + ".gif"
clip.write_gif(filename, logger=None)
with open(filename, "rb") as f:
gif = f.read()
os.remove(filename)
return gif
def encode_gif_summary(images, name, fps=3):
"""Encode TensorBoard summary for GIF images."""
# pylint: disable=invalid-name, no-member
shape = images.shape
if len(shape) == 4:
shape = (1,) + shape
summary = tf.compat.v1.Summary()
for i in range(shape[0]):
img_summary = tf.compat.v1.Summary.Image()
img_summary.height = shape[2]
img_summary.width = shape[3]
img_summary.colorspace = 3
img_summary.encoded_image_string = create_gif(images[i], fps)
if shape[0] == 1:
tag = "{}/gif".format(name)
else:
tag = "{}/gif/{}".format(name, i)
summary.value.add(tag=tag, image=img_summary)
return summary.SerializeToString()
def gif_summary(name, tensor, fps=3, step=None):
"""Create gif summary."""
tf.summary.experimental.write_raw_pb(
encode_gif_summary(images=tensor.numpy(), name=name, fps=fps),
step
)
| [
"matplotlib.pyplot.imshow",
"tensorflow.random.set_seed",
"tensorflow.compat.v1.Summary.Image",
"random.seed",
"os.remove",
"matplotlib.pyplot.figure",
"tensorflow.compat.v1.Summary",
"numpy.random.seed",
"tempfile.NamedTemporaryFile",
"numpy.maximum",
"matplotlib.pyplot.show"
] | [((394, 431), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 4 * num_img)'}), '(figsize=(20, 4 * num_img))\n', (404, 431), True, 'import matplotlib.pyplot as plt\n'), ((801, 811), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (809, 811), True, 'import matplotlib.pyplot as plt\n'), ((872, 889), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (883, 889), False, 'import random\n'), ((894, 914), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (908, 914), True, 'import numpy as np\n'), ((919, 943), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (937, 943), True, 'import tensorflow as tf\n'), ((1412, 1431), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1421, 1431), False, 'import os\n'), ((1685, 1707), 'tensorflow.compat.v1.Summary', 'tf.compat.v1.Summary', ([], {}), '()\n', (1705, 1707), True, 'import tensorflow as tf\n'), ((1236, 1265), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1263, 1265), False, 'import tempfile\n'), ((1760, 1788), 'tensorflow.compat.v1.Summary.Image', 'tf.compat.v1.Summary.Image', ([], {}), '()\n', (1786, 1788), True, 'import tensorflow as tf\n'), ((598, 628), 'matplotlib.pyplot.imshow', 'plt.imshow', (["d['input'][ind, i]"], {}), "(d['input'][ind, i])\n", (608, 628), True, 'import matplotlib.pyplot as plt\n'), ((765, 796), 'matplotlib.pyplot.imshow', 'plt.imshow', (["d['target'][ind, i]"], {}), "(d['target'][ind, i])\n", (775, 796), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1127), 'numpy.maximum', 'np.maximum', (['images', '(0)'], {}), '(images, 0)\n', (1116, 1127), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import argparse
import os
import yaml
import numpy as np
import pcl
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
from auxiliary.laserscan import SemLaserScan
import random
import open3d
# from open3d import *
import json
import collections
from tqdm import tqdm
import time
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import PointCloud2, PointField
import sensor_msgs.point_cloud2 as pc2
learning_map={0 : 0, # "unlabeled"
1 : 0, # "outlier" mapped to "unlabeled" --------------------------mapped x
10: 1, # "car"
11: 2, # "bicycle" x
13: 5, # "bus" mapped to "other-vehicle" --------------------------mapped
15: 3, # "motorcycle" x
16: 5, # "on-rails" mapped to "other-vehicle" ---------------------mapped
18: 4, # "truck"
20: 5, # "other-vehicle"
30: 6, # "person" x
31: 7, # "bicyclist" x
32: 8, # "motorcyclist" x
40: 9, # "road"
44: 10, # "parking"
48: 11, # "sidewalk"
49: 12, # "other-ground"
50: 13, # "building"
51: 14, # "fence"
52: 0, # "other-structure" mapped to "unlabeled" ------------------mapped
60: 9, # "lane-marking" to "road" ---------------------------------mapped
70: 15, # "vegetation"
71: 16, # "trunk"
72: 17, # "terrain"
80: 18, # "pole"
81: 19, # "traffic-sign"
99: 0, # "other-object" to "unlabeled" ----------------------------mapped
252: 1, # "moving-car" to "car" ------------------------------------mapped
253: 7, # "moving-bicyclist" to "bicyclist" ------------------------mapped
254: 6, # "moving-person" to "person" ------------------------------mapped
255: 8, # "moving-motorcyclist" to "motorcyclist" ------------------mapped
256: 5, # "moving-on-rails" mapped to "other-vehicle" --------------mapped
257: 5, # "moving-bus" mapped to "other-vehicle" -------------------mapped
258: 4, # "moving-truck" to "truck" --------------------------------mapped
259: 5}
max_key = max(learning_map.keys())
remap_lut = np.zeros((max_key + 100), dtype=np.int32)
remap_lut[list(learning_map.keys())] = list(learning_map.values())
node_map={
1: 0, # "car"
4: 1, # "truck"
5: 2, # "other-vehicle"
11: 3, # "sidewalk"
12: 4, # "other-ground"
13: 5, # "building"
14: 6, # "fence"
15: 7, # "vegetation"
16: 8, # "trunk"
17: 9, # "terrain"
18: 10, # "pole"
19: 11 # "traffic-sign"
}
def open3d_color():
i = random.random()
j = random.random()
k = random.random()
return (i,j,k)
def Visualize():
viz_point = open3d.PointCloud()
point_cloud = open3d.PointCloud()
for id_i, label_i in enumerate(sem_label_set):
print('sem_label:', label_i)
index = np.argwhere(sem_label == label_i)
index = index.reshape(index.shape[0])
sem_cluster = points[index, :]
point_cloud.points = open3d.Vector3dVector(sem_cluster[:, 0:3])
color = color_map[learning_map_inv[label_i]]
color = (color[0] / 255, color[1] / 255, color[2] / 255)
# print(color)
point_cloud.paint_uniform_color(color)
viz_point += point_cloud
open3d.draw_geometries([point_cloud], window_name='semantic label:' + str(111),
width=1920, height=1080, left=50, top=50)
def _make_point_field(num_field):
msg_pf1 = pc2.PointField()
msg_pf1.name = np.str('x')
msg_pf1.offset = np.uint32(0)
msg_pf1.datatype = np.uint8(7)
msg_pf1.count = np.uint32(1)
msg_pf2 = pc2.PointField()
msg_pf2.name = np.str('y')
msg_pf2.offset = np.uint32(4)
msg_pf2.datatype = np.uint8(7)
msg_pf2.count = np.uint32(1)
msg_pf3 = pc2.PointField()
msg_pf3.name = np.str('z')
msg_pf3.offset = np.uint32(8)
msg_pf3.datatype = np.uint8(7)
msg_pf3.count = np.uint32(1)
if num_field == 4:
msg_pf4 = pc2.PointField()
msg_pf4.name = np.str('node')
msg_pf4.offset = np.uint32(16)
msg_pf4.datatype = np.uint8(7)
msg_pf4.count = np.uint32(1)
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4]
elif num_field ==6:
msg_pf4 = pc2.PointField()
msg_pf4.name = np.str('intensity')
msg_pf4.offset = np.uint32(16)
msg_pf4.datatype = np.uint8(7) #float64
msg_pf4.count = np.uint32(1)
msg_pf5 = pc2.PointField()
msg_pf5.name = np.str('sem_label')
msg_pf5.offset = np.uint32(20)
msg_pf5.datatype = np.uint8(7) # 4 int16
msg_pf5.count = np.uint32(1)
msg_pf6 = pc2.PointField()
msg_pf6.name = np.str('inst_label')
msg_pf6.offset = np.uint32(24)
msg_pf6.datatype = np.uint8(7) # 4 int16
msg_pf6.count = np.uint32(1)
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4, msg_pf5, msg_pf6]
# if num_field == 4:
# fields = [PointField('x', 0, PointField.FLOAT32, 1),
# PointField('y', 4, PointField.FLOAT32, 1),
# PointField('z', 8, PointField.FLOAT32, 1),
# PointField('node', 16, PointField.UINT32, 1),
# ]
# return fields
# elif num_field == 6:
# fields = [PointField('x', 0, PointField.FLOAT32, 1),
# PointField('y', 4, PointField.FLOAT32, 1),
# PointField('z', 8, PointField.FLOAT32, 1),
# PointField('intensity', 12, PointField.FLOAT32, 1),
# PointField('sem_label', 16, PointField.UINT32, 1),
# PointField('inst_label', 20, PointField.UINT32, 1),
# ]
# return fields
else:
raise ValueError("wrong num_field.")
class Semantic_kitti_node(object):
def __init__(self, pub_rate=10, label_topic='', graph_topic=''):
"""
ros node spin in init function
:param pub_rate:
:param pub_topic:
"""
self._pub_rate = pub_rate
# publisher
self._labels_pub = rospy.Publisher(label_topic, PointCloud2, queue_size=10)
self._graph_pub = rospy.Publisher(graph_topic, PointCloud2, queue_size = 10)
# ros node init
rospy.init_node('node', anonymous=True)
rospy.loginfo("node started.")
self.header1 = Header()
self.header1.stamp = rospy.Time()
self.header1.frame_id = "velodyne"
self.header2 = Header()
self.header2.stamp = rospy.Time()
self.header2.frame_id = "velodyne"
def gen_labels(self, FLAGS, scan_name, label_name, label_output_dir):
# start = time.time()
# open scan
# TODO(yxm): downsampling
scan = np.fromfile(scan_name, dtype=np.float32)
scan = scan.reshape((-1, 4))
# put in attribute
points = scan[:, 0:4] # get xyzr
remissions = scan[:, 3] # get remission
label = np.fromfile(label_name, dtype=np.uint32)
label = label.reshape((-1))
# demolition or not
if FLAGS.demolition == True:
start_angle = np.random.random()
start_angle *= 360
end_angle = (start_angle + drop_angle)%360
angle = np.arctan2(points[:, 1], points[:, 0])
angle = angle*180/np.pi
angle += 180
# print("angle:", angle)
if end_angle > start_angle:
remain_id = np.argwhere(angle < start_angle).reshape(-1)
remain_id = np.append(remain_id, np.argwhere(angle > end_angle).reshape(-1))
else:
remain_id = np.argwhere((angle > end_angle) & (angle < start_angle)).reshape(-1)
points = points[remain_id, : ]
label = label[remain_id]
if label.shape[0] == points.shape[0]:
sem_label = label & 0xFFFF # semantic label in lower half
inst_label = label >> 16 # instance id in upper half
assert ((sem_label + (inst_label << 16) == label).all())
else:
print("Points shape: ", points.shape)
print("Label shape: ", label.shape)
raise ValueError("Scan and Label don't contain same number of points")
sem_label = remap_lut[sem_label]
sem_label_set = list(set(sem_label))
sem_label_set.sort()
# Start clustering
cluster = []
inst_id = 0
for id_i, label_i in enumerate(sem_label_set):
# print('sem_label:', label_i)
index = np.argwhere(sem_label == label_i)
index = index.reshape(index.shape[0])
sem_cluster = points[index, :]
# print("sem_cluster_shape:",sem_cluster.shape[0])
tmp_inst_label = inst_label[index]
tmp_inst_set = list(set(tmp_inst_label))
tmp_inst_set.sort()
# print(tmp_inst_set)
if label_i in [9, 10]: # road/parking, dont need to cluster
inst_cluster = sem_cluster
inst_cluster = np.concatenate((inst_cluster, np.full((inst_cluster.shape[0],1), label_i, dtype=np.uint32)), axis=1)
# inst_cluster = np.insert(inst_cluster, 4, label_i, axis=1)
inst_cluster = np.concatenate((inst_cluster, np.full((inst_cluster.shape[0],1), inst_id, dtype=np.uint32)), axis=1)
inst_id = inst_id + 1
cluster.extend(inst_cluster) # Nx6
continue
elif label_i in [0,2,3,6,7,8]: # discard
continue
elif len(tmp_inst_set) > 1 or (len(tmp_inst_set) == 1 and tmp_inst_set[0] != 0): # have instance labels
for id_j, label_j in enumerate(tmp_inst_set):
points_index = np.argwhere(tmp_inst_label == label_j)
points_index = points_index.reshape(points_index.shape[0])
# print(id_j, 'inst_size:', len(points_index))
if len(points_index) <= 20:
continue
inst_cluster = sem_cluster[points_index, :]
inst_cluster = np.concatenate((inst_cluster, np.full((inst_cluster.shape[0],1), label_i, dtype=np.uint32)), axis=1)
# inst_cluster = np.insert(inst_cluster, 4, label_i, axis=1)
inst_cluster = np.concatenate((inst_cluster, np.full((inst_cluster.shape[0],1), inst_id, dtype=np.uint32)), axis=1)
inst_id = inst_id + 1
cluster.extend(inst_cluster)
else: # Euclidean cluster
# time_start = time.time()
if label_i in [1, 4, 5, 14]: # car truck other-vehicle fence
cluster_tolerance = 0.5
elif label_i in [11, 12, 13, 15, 17]: # sidewalk other-ground building vegetation terrain
cluster_tolerance = 2
else:
cluster_tolerance = 0.2
if label_i in [16, 19]: # trunk traffic-sign
min_size = 50
elif label_i == 15: # vegetation
min_size = 200
elif label_i in [11, 12, 13, 17]: # sidewalk other-ground building terrain
min_size = 300
else:
min_size = 100
# print(cluster_tolerance, min_size)
cloud = pcl.PointCloud(sem_cluster[:, 0:3])
tree = cloud.make_kdtree()
ec = cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(cluster_tolerance)
ec.set_MinClusterSize(min_size)
ec.set_MaxClusterSize(50000)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
# time_end = time.time()
# print(time_end - time_start)
for j, indices in enumerate(cluster_indices):
# print('j = ', j, ', indices = ' + str(len(indices)))
inst_cluster = np.zeros((len(indices), 4), dtype=np.float32)
inst_cluster = sem_cluster[np.array(indices), 0:4]
inst_cluster = np.concatenate((inst_cluster, np.full((inst_cluster.shape[0],1), label_i, dtype=np.uint32)), axis=1)
# inst_cluster = np.insert(inst_cluster, 4, label_i, axis=1)
inst_cluster = np.concatenate((inst_cluster, np.full((inst_cluster.shape[0],1), inst_id, dtype=np.uint32)), axis=1)
inst_id = inst_id + 1
cluster.extend(inst_cluster) # Nx6
# print(time.time()-start)
# print('*'*80)
cluster = np.array(cluster)
if 'path' in FLAGS.pub_or_path:
np.save(label_output_dir+'/'+label_name.split('/')[-1].split('.')[0]+".npy", cluster)
if 'pub' in FLAGS.pub_or_path:
# print(cluster[11100:11110])
msg_points = pc2.create_cloud(header=self.header1, fields=_make_point_field(cluster.shape[1]), points=cluster)
self._labels_pub.publish(msg_points)
return cluster
def gen_graphs(self, FLAGS, scan_name, scan, graph_output_dir):
inst = scan[:, -1] # get instance label
inst_label_set = list(set(inst)) # get nums of inst
inst_label_set.sort()
# print("inst set: ", inst_label_set)
nodes = [] # graph node
edges = [] # graph edge
weights = [] # graph edge weights
cluster = [] # cluster -> node
centers = []
for id_i in range(len(inst_label_set)):
index = np.argwhere(inst_label_set[id_i] == inst) # query cluster by instance label
index = index.reshape(index.shape[0])
inst_cluster = scan[index, :]
sem_label = list(set(inst_cluster[:, -2])) # get semantic label
assert len(sem_label) == 1 # one instance cluster should have only one semantic label
if int(sem_label[0]) in node_map.keys():
cluster.append(inst_cluster[:, :3])
node_label = node_map[int(sem_label[0])] # add node
nodes.append(int(node_label))
cluster_center = np.mean(inst_cluster[:, :3], axis=0)
centers.append((cluster_center.tolist()))
elif int(sem_label[0]) == 9 or int(sem_label[0]) == 10: # ignore "road" and "parking"
continue
else:
print("wrong semantic label: ", sem_label[0])
exit(-1)
dist_thresh = 5 # less than thresh, add an edge between nodes
for i in range(len(cluster)-1):
for j in range(i+1, len(cluster)):
pc_i = cluster[i]
pc_j = cluster[j]
center_i = np.mean(pc_i, axis=0)
center_j = np.mean(pc_j, axis=0)
center = np.mean([center_i, center_j], axis=0) # centroid of the cluster
index1 = np.argmin(np.linalg.norm(center - pc_i[:,None], axis=-1), axis=0)
index2 = np.argmin(np.linalg.norm(center - pc_j[:,None], axis=-1), axis=0)
min_dis = np.linalg.norm(pc_i[index1] - pc_j[index2], axis=-1)
if min_dis <= dist_thresh:
edges.append([i, j]) # add edge
weight = float(1-min_dis/dist_thresh) # w = 1 - d/d_thresh [0~5m] -> [1~0]
weights.append(weight) # add edge_weight
else:
pass
# generate graph
graph = {"nodes": nodes,
"edges": edges,
"weights": weights,
"centers": centers
}
# print(graph)
if 'path' in FLAGS.pub_or_path:
file_name = os.path.join(graph_output_dir, scan_name.split('/')[-1].split('.')[0]+".json")
# print("output json: ", file_name)
with open(file_name, "w", encoding="utf-8") as file: json.dump(graph, file)
if 'pub' in FLAGS.pub_or_path:
centers = np.array(centers)
nodes = np.array(nodes)
pub_nodes = np.concatenate((centers, nodes.reshape(-1, 1).astype(np.uint32)), axis=1)
msg_points = pc2.create_cloud(header=self.header2, fields=_make_point_field(pub_nodes.shape[1]), points=pub_nodes)
self._graph_pub.publish(msg_points)
# rospy.loginfo(scan_names[frame])
if __name__ == '__main__':
parser = argparse.ArgumentParser("./gen_graph.py")
parser.add_argument('--dataset', '-d', type=str, required=False, default="/media/yxm/文档/data/kitti/dataset/", help='Dataset to calculate content. No Default')
parser.add_argument('--config', '-c', type=str, required=False, default="config/semantic-kitti.yaml", help='Dataset config file. Defaults to %(default)s')
parser.add_argument('--output_label', type=str, required=False, default="/media/kx/Semantic_KITTI/debug/labels", help='Output path for labels')
parser.add_argument('--output_graph', type=str, required=False, default="/media/kx/Semantic_KITTI/debug/graphs", help='Output path for labels')
parser.add_argument('--pub_or_path', type=str, required=False, default="path", help='pub_or_path')
parser.add_argument('--pub_rate', type=int, default=10, help='the frequency(hz) of pc published, default `10`')
parser.add_argument('--label_topic', type=str, default='/labeled_pc', help='the 3D point cloud message topic to be published, default `/labeled_pc`')
parser.add_argument('--graph_topic', type=str, default='/graphs', help='the semantic graph message topic to be published, default `/graphs`')
parser.add_argument('--demolition', type=bool, default=False, help='demolition or not')
FLAGS, unparsed = parser.parse_known_args()
# print summary of what we will do
print("*" * 80)
print("INTERFACE:")
print("Dataset", FLAGS.dataset)
print("Config", FLAGS.config)
print("*" * 80)
# open config file
try:
print("Opening config file %s" % FLAGS.config)
CFG = yaml.safe_load(open(FLAGS.config, 'r'))
except Exception as e:
print(e)
print("Error opening yaml file.")
quit()
# get training sequences to calculate statistics
sequences = CFG["split"]["train"][:]
color_map = CFG['color_map']
learning_map_inv = CFG['learning_map_inv']
print("Analizing sequences", sequences)
# itearate over sequences
for seq in sequences[:]:
# make seq string
print("*" * 80)
seqstr = '{0:02d}'.format(int(seq))
print("parsing seq {}".format(seq))
# prepare output dir
label_output_dir = FLAGS.output_label + '/' + seqstr
if not os.path.exists(label_output_dir):
os.makedirs(label_output_dir)
graph_output_dir = FLAGS.output_graph + '/' + seqstr
if not os.path.exists(graph_output_dir):
os.makedirs(graph_output_dir)
# does sequence folder exist?
scan_paths = os.path.join(FLAGS.dataset, "sequences", seqstr, "velodyne")
if os.path.isdir(scan_paths):
print("Sequence folder exists!")
else:
print("Sequence folder doesn't exist! Exiting...")
quit()
# populate the pointclouds
scan_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(scan_paths)) for f in fn]
scan_names.sort()
# does sequence folder exist?
label_paths = os.path.join(FLAGS.dataset, "sequences", seqstr, "labels")
if os.path.isdir(label_paths):
print("Labels folder exists!")
else:
print("Labels folder doesn't exist! Exiting...")
quit()
# populate the pointclouds
label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(label_paths)) for f in fn]
label_names.sort()
# check that there are same amount of labels and scans
# print(len(label_names))
# print(len(scan_names))
assert(len(label_names) == len(scan_names))
# create a scan
node = Semantic_kitti_node(FLAGS.pub_rate, FLAGS.label_topic, FLAGS.graph_topic)
rate = rospy.Rate(FLAGS.pub_rate)
for frame in tqdm(range(len(scan_names))):
if rospy.is_shutdown():
break
cluster = node.gen_labels(FLAGS, scan_names[frame], label_names[frame], label_output_dir)
node.gen_graphs(FLAGS, scan_names[frame], cluster, graph_output_dir)
rate.sleep()
# rospy.logwarn("%d frames published.", frame) | [
"numpy.uint8",
"numpy.fromfile",
"open3d.PointCloud",
"rospy.init_node",
"numpy.array",
"rospy.Rate",
"numpy.arctan2",
"numpy.linalg.norm",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.random.random",
"numpy.uint32",
"os.path.isdir",
"sensor_msgs.point_cloud2.PointFi... | [((2547, 2586), 'numpy.zeros', 'np.zeros', (['(max_key + 100)'], {'dtype': 'np.int32'}), '(max_key + 100, dtype=np.int32)\n', (2555, 2586), True, 'import numpy as np\n'), ((3009, 3024), 'random.random', 'random.random', ([], {}), '()\n', (3022, 3024), False, 'import random\n'), ((3033, 3048), 'random.random', 'random.random', ([], {}), '()\n', (3046, 3048), False, 'import random\n'), ((3057, 3072), 'random.random', 'random.random', ([], {}), '()\n', (3070, 3072), False, 'import random\n'), ((3126, 3145), 'open3d.PointCloud', 'open3d.PointCloud', ([], {}), '()\n', (3143, 3145), False, 'import open3d\n'), ((3164, 3183), 'open3d.PointCloud', 'open3d.PointCloud', ([], {}), '()\n', (3181, 3183), False, 'import open3d\n'), ((3926, 3942), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (3940, 3942), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((3962, 3973), 'numpy.str', 'np.str', (['"""x"""'], {}), "('x')\n", (3968, 3973), True, 'import numpy as np\n'), ((3995, 4007), 'numpy.uint32', 'np.uint32', (['(0)'], {}), '(0)\n', (4004, 4007), True, 'import numpy as np\n'), ((4031, 4042), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (4039, 4042), True, 'import numpy as np\n'), ((4063, 4075), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (4072, 4075), True, 'import numpy as np\n'), ((4091, 4107), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (4105, 4107), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((4127, 4138), 'numpy.str', 'np.str', (['"""y"""'], {}), "('y')\n", (4133, 4138), True, 'import numpy as np\n'), ((4160, 4172), 'numpy.uint32', 'np.uint32', (['(4)'], {}), '(4)\n', (4169, 4172), True, 'import numpy as np\n'), ((4196, 4207), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (4204, 4207), True, 'import numpy as np\n'), ((4228, 4240), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (4237, 4240), True, 'import numpy as np\n'), ((4256, 4272), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (4270, 4272), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((4292, 4303), 'numpy.str', 'np.str', (['"""z"""'], {}), "('z')\n", (4298, 4303), True, 'import numpy as np\n'), ((4325, 4337), 'numpy.uint32', 'np.uint32', (['(8)'], {}), '(8)\n', (4334, 4337), True, 'import numpy as np\n'), ((4361, 4372), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (4369, 4372), True, 'import numpy as np\n'), ((4393, 4405), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (4402, 4405), True, 'import numpy as np\n'), ((16926, 16967), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""./gen_graph.py"""'], {}), "('./gen_graph.py')\n", (16949, 16967), False, 'import argparse\n'), ((3298, 3331), 'numpy.argwhere', 'np.argwhere', (['(sem_label == label_i)'], {}), '(sem_label == label_i)\n', (3309, 3331), True, 'import numpy as np\n'), ((3451, 3493), 'open3d.Vector3dVector', 'open3d.Vector3dVector', (['sem_cluster[:, 0:3]'], {}), '(sem_cluster[:, 0:3])\n', (3472, 3493), False, 'import open3d\n'), ((4448, 4464), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (4462, 4464), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((4488, 4502), 'numpy.str', 'np.str', (['"""node"""'], {}), "('node')\n", (4494, 4502), True, 'import numpy as np\n'), ((4528, 4541), 'numpy.uint32', 'np.uint32', (['(16)'], {}), '(16)\n', (4537, 4541), True, 'import numpy as np\n'), ((4569, 4580), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (4577, 4580), True, 'import numpy as np\n'), ((4605, 4617), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (4614, 4617), True, 'import numpy as np\n'), ((6461, 6517), 'rospy.Publisher', 'rospy.Publisher', (['label_topic', 'PointCloud2'], {'queue_size': '(10)'}), '(label_topic, PointCloud2, queue_size=10)\n', (6476, 6517), False, 'import rospy\n'), ((6544, 6600), 'rospy.Publisher', 'rospy.Publisher', (['graph_topic', 'PointCloud2'], {'queue_size': '(10)'}), '(graph_topic, PointCloud2, queue_size=10)\n', (6559, 6600), False, 'import rospy\n'), ((6635, 6674), 'rospy.init_node', 'rospy.init_node', (['"""node"""'], {'anonymous': '(True)'}), "('node', anonymous=True)\n", (6650, 6674), False, 'import rospy\n'), ((6683, 6713), 'rospy.loginfo', 'rospy.loginfo', (['"""node started."""'], {}), "('node started.')\n", (6696, 6713), False, 'import rospy\n'), ((6738, 6746), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (6744, 6746), False, 'from std_msgs.msg import Header\n'), ((6776, 6788), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (6786, 6788), False, 'import rospy\n'), ((6856, 6864), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (6862, 6864), False, 'from std_msgs.msg import Header\n'), ((6894, 6906), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (6904, 6906), False, 'import rospy\n'), ((7128, 7168), 'numpy.fromfile', 'np.fromfile', (['scan_name'], {'dtype': 'np.float32'}), '(scan_name, dtype=np.float32)\n', (7139, 7168), True, 'import numpy as np\n'), ((7341, 7381), 'numpy.fromfile', 'np.fromfile', (['label_name'], {'dtype': 'np.uint32'}), '(label_name, dtype=np.uint32)\n', (7352, 7381), True, 'import numpy as np\n'), ((13139, 13156), 'numpy.array', 'np.array', (['cluster'], {}), '(cluster)\n', (13147, 13156), True, 'import numpy as np\n'), ((19483, 19543), 'os.path.join', 'os.path.join', (['FLAGS.dataset', '"""sequences"""', 'seqstr', '"""velodyne"""'], {}), "(FLAGS.dataset, 'sequences', seqstr, 'velodyne')\n", (19495, 19543), False, 'import os\n'), ((19555, 19580), 'os.path.isdir', 'os.path.isdir', (['scan_paths'], {}), '(scan_paths)\n', (19568, 19580), False, 'import os\n'), ((19958, 20016), 'os.path.join', 'os.path.join', (['FLAGS.dataset', '"""sequences"""', 'seqstr', '"""labels"""'], {}), "(FLAGS.dataset, 'sequences', seqstr, 'labels')\n", (19970, 20016), False, 'import os\n'), ((20028, 20054), 'os.path.isdir', 'os.path.isdir', (['label_paths'], {}), '(label_paths)\n', (20041, 20054), False, 'import os\n'), ((20682, 20708), 'rospy.Rate', 'rospy.Rate', (['FLAGS.pub_rate'], {}), '(FLAGS.pub_rate)\n', (20692, 20708), False, 'import rospy\n'), ((4713, 4729), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (4727, 4729), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((4753, 4772), 'numpy.str', 'np.str', (['"""intensity"""'], {}), "('intensity')\n", (4759, 4772), True, 'import numpy as np\n'), ((4798, 4811), 'numpy.uint32', 'np.uint32', (['(16)'], {}), '(16)\n', (4807, 4811), True, 'import numpy as np\n'), ((4839, 4850), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (4847, 4850), True, 'import numpy as np\n'), ((4885, 4897), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (4894, 4897), True, 'import numpy as np\n'), ((4917, 4933), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (4931, 4933), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((4957, 4976), 'numpy.str', 'np.str', (['"""sem_label"""'], {}), "('sem_label')\n", (4963, 4976), True, 'import numpy as np\n'), ((5002, 5015), 'numpy.uint32', 'np.uint32', (['(20)'], {}), '(20)\n', (5011, 5015), True, 'import numpy as np\n'), ((5043, 5054), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (5051, 5054), True, 'import numpy as np\n'), ((5090, 5102), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (5099, 5102), True, 'import numpy as np\n'), ((5122, 5138), 'sensor_msgs.point_cloud2.PointField', 'pc2.PointField', ([], {}), '()\n', (5136, 5138), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((5162, 5182), 'numpy.str', 'np.str', (['"""inst_label"""'], {}), "('inst_label')\n", (5168, 5182), True, 'import numpy as np\n'), ((5208, 5221), 'numpy.uint32', 'np.uint32', (['(24)'], {}), '(24)\n', (5217, 5221), True, 'import numpy as np\n'), ((5249, 5260), 'numpy.uint8', 'np.uint8', (['(7)'], {}), '(7)\n', (5257, 5260), True, 'import numpy as np\n'), ((5296, 5308), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (5305, 5308), True, 'import numpy as np\n'), ((7510, 7528), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7526, 7528), True, 'import numpy as np\n'), ((7636, 7674), 'numpy.arctan2', 'np.arctan2', (['points[:, 1]', 'points[:, 0]'], {}), '(points[:, 1], points[:, 0])\n', (7646, 7674), True, 'import numpy as np\n'), ((8926, 8959), 'numpy.argwhere', 'np.argwhere', (['(sem_label == label_i)'], {}), '(sem_label == label_i)\n', (8937, 8959), True, 'import numpy as np\n'), ((14070, 14111), 'numpy.argwhere', 'np.argwhere', (['(inst_label_set[id_i] == inst)'], {}), '(inst_label_set[id_i] == inst)\n', (14081, 14111), True, 'import numpy as np\n'), ((16510, 16527), 'numpy.array', 'np.array', (['centers'], {}), '(centers)\n', (16518, 16527), True, 'import numpy as np\n'), ((16548, 16563), 'numpy.array', 'np.array', (['nodes'], {}), '(nodes)\n', (16556, 16563), True, 'import numpy as np\n'), ((19186, 19218), 'os.path.exists', 'os.path.exists', (['label_output_dir'], {}), '(label_output_dir)\n', (19200, 19218), False, 'import os\n'), ((19232, 19261), 'os.makedirs', 'os.makedirs', (['label_output_dir'], {}), '(label_output_dir)\n', (19243, 19261), False, 'import os\n'), ((19339, 19371), 'os.path.exists', 'os.path.exists', (['graph_output_dir'], {}), '(graph_output_dir)\n', (19353, 19371), False, 'import os\n'), ((19385, 19414), 'os.makedirs', 'os.makedirs', (['graph_output_dir'], {}), '(graph_output_dir)\n', (19396, 19414), False, 'import os\n'), ((19780, 19799), 'os.path.join', 'os.path.join', (['dp', 'f'], {}), '(dp, f)\n', (19792, 19799), False, 'import os\n'), ((20251, 20270), 'os.path.join', 'os.path.join', (['dp', 'f'], {}), '(dp, f)\n', (20263, 20270), False, 'import os\n'), ((20775, 20794), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (20792, 20794), False, 'import rospy\n'), ((14665, 14701), 'numpy.mean', 'np.mean', (['inst_cluster[:, :3]'], {'axis': '(0)'}), '(inst_cluster[:, :3], axis=0)\n', (14672, 14701), True, 'import numpy as np\n'), ((15242, 15263), 'numpy.mean', 'np.mean', (['pc_i'], {'axis': '(0)'}), '(pc_i, axis=0)\n', (15249, 15263), True, 'import numpy as np\n'), ((15291, 15312), 'numpy.mean', 'np.mean', (['pc_j'], {'axis': '(0)'}), '(pc_j, axis=0)\n', (15298, 15312), True, 'import numpy as np\n'), ((15338, 15375), 'numpy.mean', 'np.mean', (['[center_i, center_j]'], {'axis': '(0)'}), '([center_i, center_j], axis=0)\n', (15345, 15375), True, 'import numpy as np\n'), ((15612, 15664), 'numpy.linalg.norm', 'np.linalg.norm', (['(pc_i[index1] - pc_j[index2])'], {'axis': '(-1)'}), '(pc_i[index1] - pc_j[index2], axis=-1)\n', (15626, 15664), True, 'import numpy as np\n'), ((16426, 16448), 'json.dump', 'json.dump', (['graph', 'file'], {}), '(graph, file)\n', (16435, 16448), False, 'import json\n'), ((15439, 15486), 'numpy.linalg.norm', 'np.linalg.norm', (['(center - pc_i[:, None])'], {'axis': '(-1)'}), '(center - pc_i[:, None], axis=-1)\n', (15453, 15486), True, 'import numpy as np\n'), ((15530, 15577), 'numpy.linalg.norm', 'np.linalg.norm', (['(center - pc_j[:, None])'], {'axis': '(-1)'}), '(center - pc_j[:, None], axis=-1)\n', (15544, 15577), True, 'import numpy as np\n'), ((19826, 19856), 'os.path.expanduser', 'os.path.expanduser', (['scan_paths'], {}), '(scan_paths)\n', (19844, 19856), False, 'import os\n'), ((20297, 20328), 'os.path.expanduser', 'os.path.expanduser', (['label_paths'], {}), '(label_paths)\n', (20315, 20328), False, 'import os\n'), ((7841, 7873), 'numpy.argwhere', 'np.argwhere', (['(angle < start_angle)'], {}), '(angle < start_angle)\n', (7852, 7873), True, 'import numpy as np\n'), ((8025, 8081), 'numpy.argwhere', 'np.argwhere', (['((angle > end_angle) & (angle < start_angle))'], {}), '((angle > end_angle) & (angle < start_angle))\n', (8036, 8081), True, 'import numpy as np\n'), ((9463, 9524), 'numpy.full', 'np.full', (['(inst_cluster.shape[0], 1)', 'label_i'], {'dtype': 'np.uint32'}), '((inst_cluster.shape[0], 1), label_i, dtype=np.uint32)\n', (9470, 9524), True, 'import numpy as np\n'), ((9672, 9733), 'numpy.full', 'np.full', (['(inst_cluster.shape[0], 1)', 'inst_id'], {'dtype': 'np.uint32'}), '((inst_cluster.shape[0], 1), inst_id, dtype=np.uint32)\n', (9679, 9733), True, 'import numpy as np\n'), ((11853, 11888), 'pcl.PointCloud', 'pcl.PointCloud', (['sem_cluster[:, 0:3]'], {}), '(sem_cluster[:, 0:3])\n', (11867, 11888), False, 'import pcl\n'), ((7935, 7965), 'numpy.argwhere', 'np.argwhere', (['(angle > end_angle)'], {}), '(angle > end_angle)\n', (7946, 7965), True, 'import numpy as np\n'), ((10202, 10240), 'numpy.argwhere', 'np.argwhere', (['(tmp_inst_label == label_j)'], {}), '(tmp_inst_label == label_j)\n', (10213, 10240), True, 'import numpy as np\n'), ((10597, 10658), 'numpy.full', 'np.full', (['(inst_cluster.shape[0], 1)', 'label_i'], {'dtype': 'np.uint32'}), '((inst_cluster.shape[0], 1), label_i, dtype=np.uint32)\n', (10604, 10658), True, 'import numpy as np\n'), ((10814, 10875), 'numpy.full', 'np.full', (['(inst_cluster.shape[0], 1)', 'inst_id'], {'dtype': 'np.uint32'}), '((inst_cluster.shape[0], 1), inst_id, dtype=np.uint32)\n', (10821, 10875), True, 'import numpy as np\n'), ((12587, 12604), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (12595, 12604), True, 'import numpy as np\n'), ((12676, 12737), 'numpy.full', 'np.full', (['(inst_cluster.shape[0], 1)', 'label_i'], {'dtype': 'np.uint32'}), '((inst_cluster.shape[0], 1), label_i, dtype=np.uint32)\n', (12683, 12737), True, 'import numpy as np\n'), ((12893, 12954), 'numpy.full', 'np.full', (['(inst_cluster.shape[0], 1)', 'inst_id'], {'dtype': 'np.uint32'}), '((inst_cluster.shape[0], 1), inst_id, dtype=np.uint32)\n', (12900, 12954), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers
from tensorflow.keras import Model
import tensorflow.keras.backend as K
from tensorflow.keras.initializers import glorot_normal
class Identity(layers.Layer):
'''
Identity layer. It returns a copy of the input.
'''
# According to the TensorFlow documentation, it's a good practice to add this function
def __init__(self, **kwargs):
super(Identity, self).__init__(**kwargs)
# According to the TensorFlow documentation, it's a good practice to add this function
def get_config(self):
config = super(Identity, self).get_config()
return config
def call(self, inputs):
return tf.identity(inputs)
class LinearRegLayer(layers.Layer):
'''
A simple linear layer that penalizes deviations from the initial weights
'''
# According to the TensorFlow documentation, it's a good practice to add this function
def __init__(self, units=10, rate=.01, init_weights=[], **kwargs):
super(LinearRegLayer, self).__init__(**kwargs)
self.units = units
self.rate = rate
self.init_weights = init_weights
# According to the TensorFlow documentation, it's a good practice to add this function
def get_config(self):
config = super(LinearRegLayer, self).get_config()
return config
def build(self, input_shape):
# Get the number of dimensions of the data
num_dim = input_shape[-1]
# Build the actual weights
self.w = self.add_weight(shape=(num_dim, self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
current_weights = self.weights
current_w = current_weights[0]
current_b = current_weights[1]
init_w = np.array(self.init_weights[0])
init_b = np.array(self.init_weights[1])
diff_w = K.sum(K.square(current_w - init_w))
diff_b = K.sum(K.square(current_b - init_b))
total_diff = diff_w + diff_b
self.add_loss(self.rate * total_diff)
return tf.matmul(inputs, self.w) + self.b
class LinearLayer(layers.Layer):
'''
A simple linear layer that penalizes deviations from the initial weights
'''
# According to the TensorFlow documentation, it's a good practice to add this function
def __init__(self, units=10, **kwargs):
super(LinearLayer, self).__init__(**kwargs)
self.units = units
# According to the TensorFlow documentation, it's a good practice to add this function
def get_config(self):
config = super(LinearLayer, self).get_config()
return config
def build(self, input_shape):
# Get the number of dimensions of the data
num_dim = input_shape[-1]
# Build the actual weights
self.w = self.add_weight(shape=(num_dim, self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
class ConvLayer(layers.Layer):
'''
Layer that computes the 2D convolution and penalizes deviations from weights.
'''
def __init__(self, size=[3,3], num_filters=32, gate=tf.nn.relu,
stride=[1,1,1,1], padding='SAME', **kwargs):
super(ConvLayer, self).__init__(**kwargs)
self.size = size
self.num_filters = num_filters
self.gate = gate
self.stride = stride
self.padding = padding
# According to the TensorFlow documentation, it's a good practice to add this function
def get_config(self):
config = super(ConvLayer, self).get_config()
return config
def build(self, input_shape):
# Get the number of dimensions of the data
dim_in = input_shape[-1]
filter_height = self.size[0]
filter_width = self.size[1]
# Build the actual weights
self.w = self.add_weight(shape=(filter_height, filter_width, dim_in, self.num_filters),
initializer=glorot_normal(),
trainable=True)
self.b = self.add_weight(shape=(self.num_filters,),
initializer=glorot_normal(),
trainable=True)
def call(self, inputs):
x = tf.nn.conv2d(inputs, filters=self.w, strides=self.stride, padding=self.padding)
x = tf.add(x, self.b)
return self.gate(x)
class RegConvLayer(layers.Layer):
'''
Layer that computes the 2D convolution and penalizes deviations from weights.
'''
def __init__(self, size=[3,3], num_filters=32, gate=tf.nn.relu, rate=.01,
init_weights=[], stride=[1,1,1,1], padding='SAME', **kwargs):
super(RegConvLayer, self).__init__(**kwargs)
self.rate = rate
self.init_weights = init_weights
self.size = size
self.num_filters = num_filters
self.gate = gate
self.stride = stride
self.padding = padding
# According to the TensorFlow documentation, it's a good practice to add this function
def get_config(self):
config = super(RegConvLayer, self).get_config()
return config
def build(self, input_shape):
# Get the number of dimensions of the data
dim_in = input_shape[-1]
filter_height = self.size[0]
filter_width = self.size[1]
# Build the actual weights
self.w = self.add_weight(shape=(filter_height, filter_width, dim_in, self.num_filters),
initializer=glorot_normal(),
trainable=True)
self.b = self.add_weight(shape=(self.num_filters,),
initializer=glorot_normal(),
trainable=True)
def call(self, inputs):
current_weights = self.weights
current_w = current_weights[0]
current_b = current_weights[1]
init_w = np.array(self.init_weights[0])
init_b = np.array(self.init_weights[1])
diff_w = K.sum(K.square(current_w - init_w))
diff_b = K.sum(K.square(current_b - init_b))
total_diff = diff_w + diff_b
self.add_loss(self.rate * total_diff)
x = tf.nn.conv2d(inputs, self.w, strides=self.stride, padding=self.padding)
x = tf.add(x, self.b)
return self.gate(x)
class RegTransposeConvLayer(layers.Layer):
'''
Layer that computes the 2D convolution and penalizes deviations from weights.
'''
def __init__(self, size=[3,3], num_filters=32, gate=tf.nn.relu, rate=.01,
init_weights=[], stride=[1,1,1,1], padding='SAME', **kwargs):
super(RegTransposeConvLayer, self).__init__(**kwargs)
self.rate = rate
self.init_weights = init_weights
self.size = size
self.num_filters = num_filters
self.gate = gate
self.stride = stride
self.padding = padding
self.output_shape = None
# According to the TensorFlow documentation, it's a good practice to add this function
def get_config(self):
config = super(RegTransposeConvLayer, self).get_config()
return config
def build(self, input_shape):
# Get the number of dimensions of the data
batch = input_shape[0]
dim_in = input_shape[-1]
filter_height = self.size[0]
filter_width = self.size[1]
new_height = deconv_output_length(height, filter_height, padding, strides[1])
new_width = deconv_output_length(width, filter_width, padding, strides[2])
self.output_shape = tf.convert_to_tensor([batch, new_height, new_width, self.num_filters])
# Build the actual weights
self.w = self.add_weight(shape=(filter_height, filter_width, self.num_filters, dim_in),
initializer=glorot_normal(),
trainable=True)
self.b = self.add_weight(shape=(self.num_filters,),
initializer=glorot_normal(),
trainable=True)
def call(self, inputs):
current_weights = self.weights
current_w = current_weights[0]
current_b = current_weights[1]
init_w = np.array(self.init_weights[0])
init_b = np.array(self.init_weights[1])
diff_w = K.sum(K.square(current_w - init_w))
diff_b = K.sum(K.square(current_b - init_b))
total_diff = diff_w + diff_b
self.add_loss(self.rate * total_diff)
x = tf.nn.conv2d_transpose(inputs, self.w,self.output_shape,
strides=self.stride, padding=self.padding)
x = tf.add(x, self.b)
return self.gate(x)
class MyReshape(layers.Layer):
def __init__(self, target_shape, **kwargs):
super(MyReshape, self).__init__(**kwargs)
self.target_shape = target_shape
# According to the TensorFlow documentation, it's a good practice to add this function
def get_config(self):
config = super(MyReshape, self).get_config()
return config
def call(self, inputs):
reshaped = tf.reshape(inputs, self.target_shape)
return reshaped
# # -----------------------------------------------------------------------------------------
# # The following functions used to work on TensorFlow 1.XX
# # Create the custom 3D-Layer
# def Convolution_3D(name, label, inputs, kernel_size, channels_in, channels_out, transfer,
# strides=[1,1,1], padding='SAME', initializer_W=None, initializer_b=None, reuse=False):
# with tf.variable_scope(name, reuse=reuse):
# with tf.variable_scope(label, reuse=reuse):
# W = tf.get_variable('W', [kernel_size, kernel_size, kernel_size, channels_in, channels_out],
# initializer=initializer_W)
# b = tf.get_variable('bias', [channels_out], initializer=initializer_b)
# # The first and last elemnts of strides should alwasys be 1
# c_strides = [1] + list(strides) + [1]
# # Perform the 3D convolution
# z_hat = tf.nn.conv3d(inputs, W, strides=c_strides, padding=padding)
# # Add the bias
# z_hat = tf.nn.bias_add(z_hat, b)
# # Apply the transfer function
# y_hat = transfer(z_hat)
# return W, b, z_hat, y_hat
# def Up_Convolution_3D(name, label, inputs, kernel_size, channels_in, channels_out,
# strides=[2,2,2], padding='SAME', initializer=None, reuse=False):
# with tf.variable_scope(name, reuse=reuse):
# with tf.variable_scope(label, reuse=reuse):
# W = tf.get_variable('W', [kernel_size, kernel_size, kernel_size, channels_out, channels_in],
# initializer=initializer)
# # The first and last elemnts of strides should alwasys be 1
# c_strides = [1] + list(strides) + [1]
# # Extract the shape of the inputs
# inputs_size = tf.shape(inputs)
# batch = inputs_size[0]
# depth = inputs_size[1]
# height = inputs_size[2]
# width = inputs_size[3]
# in_channels = inputs_size[4]
# # Compute the shape after the de-convolution
# new_depth = deconv_output_length(depth, kernel_size, padding, strides[0])
# new_height = deconv_output_length(height, kernel_size, padding, strides[1])
# new_width = deconv_output_length(width, kernel_size, padding, strides[2])
# output_shape = tf.convert_to_tensor([batch, new_depth, new_height, new_width, channels_out])
# # Apply the deconvolution
# z_hat = tf.nn.conv3d_transpose(inputs, W, output_shape, strides=c_strides, padding=padding)
# return W, z_hat
# def Fully_Connected(name, label, inputs, dim_in, dim_out, transfer, reuse=False):
# with tf.variable_scope(name, reuse=reuse):
# with tf.variable_scope(label, reuse=reuse):
# W = tf.get_variable('W', [dim_in, dim_out])
# b = tf.get_variable('b', [dim_out])
# z_hat = tf.matmul(inputs, W) + b
# y_hat = transfer(z_hat)
# return W, b, z_hat, y_hat
# def Convolution_2D(name, label, inputs, kernel_size, channels_in, channels_out, transfer,
# strides=[1,1], padding='SAME', initializer_W=None, initializer_b=None, reuse=False):
# '''
# This layer computes the 2D standard convolution.
# Arguments:
# name: Name of the network.
# label: Name of this particular layer
# inputs: A tf.placeholder containing the inputs: [num_images, height, width, channels]
# kernel_size: An int specifing the size of the kxk kernel
# channels_in: Number of channels of the input
# channels_out: Number of filters to create
# transfer: Transfer function to use.
# strides: The first and the last elements are always 1. The elements in the middle are the
# y and x steps.
# '''
# with tf.variable_scope(name, reuse=reuse):
# with tf.variable_scope(label, reuse=reuse):
# W = tf.get_variable('W', [kernel_size, kernel_size, channels_in, channels_out],
# initializer=initializer_W)
# b = tf.get_variable('bias', [channels_out], initializer=initializer_b)
# # The first and last elemnts of strides should alwasys be 1
# c_strides = [1] + list(strides) + [1]
# # Compute the convolution
# z_hat = tf.nn.conv2d(inputs, W, c_strides, padding)
# # Add the bias
# z_hat = tf.nn.bias_add(z_hat, b)
# # Apply the transfer function
# y_hat = transfer(z_hat)
# return W, b, z_hat, y_hat
def deconv_output_length(input_length, filter_size, padding, stride):
"""This function was adapted from Keras
Determines output length of a transposed convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
output_length = input_length * stride
if padding == 'VALID':
output_length = output_length + max(filter_size - stride, 0)
return output_length
def main():
return -1
if __name__ == '__main__':
# Do nothing
main() | [
"tensorflow.nn.conv2d",
"tensorflow.add",
"numpy.array",
"tensorflow.keras.initializers.glorot_normal",
"tensorflow.matmul",
"tensorflow.nn.conv2d_transpose",
"tensorflow.reshape",
"tensorflow.convert_to_tensor",
"tensorflow.identity",
"tensorflow.keras.backend.square"
] | [((669, 688), 'tensorflow.identity', 'tf.identity', (['inputs'], {}), '(inputs)\n', (680, 688), True, 'import tensorflow as tf\n'), ((1751, 1781), 'numpy.array', 'np.array', (['self.init_weights[0]'], {}), '(self.init_weights[0])\n', (1759, 1781), True, 'import numpy as np\n'), ((1793, 1823), 'numpy.array', 'np.array', (['self.init_weights[1]'], {}), '(self.init_weights[1])\n', (1801, 1823), True, 'import numpy as np\n'), ((3994, 4073), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs'], {'filters': 'self.w', 'strides': 'self.stride', 'padding': 'self.padding'}), '(inputs, filters=self.w, strides=self.stride, padding=self.padding)\n', (4006, 4073), True, 'import tensorflow as tf\n'), ((4080, 4097), 'tensorflow.add', 'tf.add', (['x', 'self.b'], {}), '(x, self.b)\n', (4086, 4097), True, 'import tensorflow as tf\n'), ((5364, 5394), 'numpy.array', 'np.array', (['self.init_weights[0]'], {}), '(self.init_weights[0])\n', (5372, 5394), True, 'import numpy as np\n'), ((5406, 5436), 'numpy.array', 'np.array', (['self.init_weights[1]'], {}), '(self.init_weights[1])\n', (5414, 5436), True, 'import numpy as np\n'), ((5612, 5683), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'self.w'], {'strides': 'self.stride', 'padding': 'self.padding'}), '(inputs, self.w, strides=self.stride, padding=self.padding)\n', (5624, 5683), True, 'import tensorflow as tf\n'), ((5690, 5707), 'tensorflow.add', 'tf.add', (['x', 'self.b'], {}), '(x, self.b)\n', (5696, 5707), True, 'import tensorflow as tf\n'), ((6820, 6890), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[batch, new_height, new_width, self.num_filters]'], {}), '([batch, new_height, new_width, self.num_filters])\n', (6840, 6890), True, 'import tensorflow as tf\n'), ((7305, 7335), 'numpy.array', 'np.array', (['self.init_weights[0]'], {}), '(self.init_weights[0])\n', (7313, 7335), True, 'import numpy as np\n'), ((7347, 7377), 'numpy.array', 'np.array', (['self.init_weights[1]'], {}), '(self.init_weights[1])\n', (7355, 7377), True, 'import numpy as np\n'), ((7553, 7658), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['inputs', 'self.w', 'self.output_shape'], {'strides': 'self.stride', 'padding': 'self.padding'}), '(inputs, self.w, self.output_shape, strides=self.\n stride, padding=self.padding)\n', (7575, 7658), True, 'import tensorflow as tf\n'), ((7662, 7679), 'tensorflow.add', 'tf.add', (['x', 'self.b'], {}), '(x, self.b)\n', (7668, 7679), True, 'import tensorflow as tf\n'), ((8074, 8111), 'tensorflow.reshape', 'tf.reshape', (['inputs', 'self.target_shape'], {}), '(inputs, self.target_shape)\n', (8084, 8111), True, 'import tensorflow as tf\n'), ((1842, 1870), 'tensorflow.keras.backend.square', 'K.square', (['(current_w - init_w)'], {}), '(current_w - init_w)\n', (1850, 1870), True, 'import tensorflow.keras.backend as K\n'), ((1889, 1917), 'tensorflow.keras.backend.square', 'K.square', (['(current_b - init_b)'], {}), '(current_b - init_b)\n', (1897, 1917), True, 'import tensorflow.keras.backend as K\n'), ((2002, 2027), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (2011, 2027), True, 'import tensorflow as tf\n'), ((2907, 2932), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w'], {}), '(inputs, self.w)\n', (2916, 2932), True, 'import tensorflow as tf\n'), ((5455, 5483), 'tensorflow.keras.backend.square', 'K.square', (['(current_w - init_w)'], {}), '(current_w - init_w)\n', (5463, 5483), True, 'import tensorflow.keras.backend as K\n'), ((5502, 5530), 'tensorflow.keras.backend.square', 'K.square', (['(current_b - init_b)'], {}), '(current_b - init_b)\n', (5510, 5530), True, 'import tensorflow.keras.backend as K\n'), ((7396, 7424), 'tensorflow.keras.backend.square', 'K.square', (['(current_w - init_w)'], {}), '(current_w - init_w)\n', (7404, 7424), True, 'import tensorflow.keras.backend as K\n'), ((7443, 7471), 'tensorflow.keras.backend.square', 'K.square', (['(current_b - init_b)'], {}), '(current_b - init_b)\n', (7451, 7471), True, 'import tensorflow.keras.backend as K\n'), ((3818, 3833), 'tensorflow.keras.initializers.glorot_normal', 'glorot_normal', ([], {}), '()\n', (3831, 3833), False, 'from tensorflow.keras.initializers import glorot_normal\n'), ((3923, 3938), 'tensorflow.keras.initializers.glorot_normal', 'glorot_normal', ([], {}), '()\n', (3936, 3938), False, 'from tensorflow.keras.initializers import glorot_normal\n'), ((5085, 5100), 'tensorflow.keras.initializers.glorot_normal', 'glorot_normal', ([], {}), '()\n', (5098, 5100), False, 'from tensorflow.keras.initializers import glorot_normal\n'), ((5190, 5205), 'tensorflow.keras.initializers.glorot_normal', 'glorot_normal', ([], {}), '()\n', (5203, 5205), False, 'from tensorflow.keras.initializers import glorot_normal\n'), ((7026, 7041), 'tensorflow.keras.initializers.glorot_normal', 'glorot_normal', ([], {}), '()\n', (7039, 7041), False, 'from tensorflow.keras.initializers import glorot_normal\n'), ((7131, 7146), 'tensorflow.keras.initializers.glorot_normal', 'glorot_normal', ([], {}), '()\n', (7144, 7146), False, 'from tensorflow.keras.initializers import glorot_normal\n')] |
import pandas as pd
from torch import nn, manual_seed
import torch
import time
import os
import random
import numpy as np
from ast import literal_eval
from torch.nn import L1Loss
from torch.optim import Adam
def read_feature_table(path, target, variables):
feature_table = pd.read_csv(path)
for var in [target]+variables:
feature_table[var] = feature_table[var].apply(literal_eval)
return feature_table
def train_model(feature_table, variables, window, hyperparameters):
train_loader, train_x_tensor, train_y_tensor = \
torch_data(feature_table, target="target", variables=variables, group_var="group", batch=50, group="train")
valid_loader, valid_x_tensor, valid_y_tensor = \
torch_data(feature_table, target="target", variables=variables, group_var="group", batch=50, group="valid")
test_loader, test_x_tensor, test_y_tensor = \
torch_data(feature_table, target="target", variables=variables, group_var="group", batch=50, group="test")
model = Model(input_layer=window,
hidden_layer=hyperparameters["hidden_layer"],
dropout=hyperparameters["dropout"])
criterion = L1Loss()
optimizer = Adam(model.parameters(), lr=hyperparameters["lr"])
tensores = {"train_x_tensor": train_x_tensor,
"train_y_tensor": train_y_tensor,
"valid_x_tensor": valid_x_tensor,
"valid_y_tensor": valid_y_tensor,
"test_x_tensor": test_x_tensor,
"test_y_tensor": test_y_tensor}
train(model,
train_loader, valid_loader,
criterion, optimizer,
epochs=hyperparameters["epochs"],
seed=42)
return model, tensores
def torch_data(data, target, variables, group_var, batch, group):
if group is not None:
data = data[data[group_var] == group].reset_index()
x_tensor = torch.Tensor(data[variables].values.tolist())
y_tensor = torch.Tensor(data[target])
dataset = torch.utils.data.TensorDataset(x_tensor,y_tensor)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch)
return loader, x_tensor, y_tensor
# class model_fc2h(nn.Module):
# def __init__(self, input_layer, hidden_layer=50, dropout=0.25):
# super(model_fc2h, self).__init__()
# self.dropout = nn.Dropout(dropout)
# self.fc1 = nn.Linear(input_layer, hidden_layer)
# self.fc2 = nn.Linear(input_layer, hidden_layer)
# self.fc3 = nn.Linear(hidden_layer*2, 1)
# self.tanh = nn.Tanh()
# def forward(self, input):
# x_lags = input[:,0]
# x_sign = input[:,1]
# out_lags = self.fc1(x_lags)
# out_lags = self.dropout(out_lags)
# out_sign = self.fc2(x_sign)
# out_sign = self.tanh(out_lags)
# output = self.fc3(torch.cat((out_lags, out_sign), 1))
# return output
# class model_lstm(nn.Module):
# def __init__(self, input_layer, hidden_layer, dropout):
# super(model_lstm, self).__init__()
# self.hidden_layer = hidden_layer
# self.hidden_cell = (torch.zeros(1,1,self.hidden_layer),
# torch.zeros(1,1,self.hidden_layer))
# self.lstm = nn.LSTM(input_layer, hidden_layer)
# self.linear = nn.Linear(hidden_layer, 1)
# def forward(self, input):
# x_lags = input
# lstm_out, self.hidden_cell = self.lstm(x_lags.view(len(x_lags),1 , -1), self.hidden_cell)
# output = self.linear(lstm_out)
# return lstm_out[:,:,0]
# class model_fc1h(nn.Module):
# def __init__(self, input_layer, hidden_layer=50, dropout=0.25):
# super(model_fc1h, self).__init__()
# self.dropout = nn.Dropout(dropout)
# self.fc1 = nn.Linear(input_layer, hidden_layer)
# self.fc2 = nn.Linear(hidden_layer, 1)
# def forward(self, input):
# x = self.fc1(input)
# x = self.dropout(x)
# output = self.fc2(x)
# return output[:,:,0]
class Model(nn.Module):
def __init__(self, input_layer, hidden_layer=50, dropout=0.25):
super(Model, self).__init__()
self.hidden_layer = hidden_layer
self.dropout = nn.Dropout(dropout)
self.fc1 = nn.Linear(input_layer, hidden_layer)
self.fc2 = nn.Linear(hidden_layer*2+7, 1)
self.hidden_cell = (torch.zeros(1,1,self.hidden_layer),
torch.zeros(1,1,self.hidden_layer))
self.lstm = nn.LSTM(input_layer, hidden_layer)
def forward(self, input):
lags = input[:,0,:]
delta_sign = input[:,1,:]
weekday_vector = input[:,2,:]
#lag_pct_IBOV = input[:,3,:]
#lag_pct_IBOV = lag_pct_IBOV.view(len(input[:,3,:]),1,-1)[:,:,0]
fc1_out = self.dropout(self.fc1(lags))
lstm_out, self.hidden_cell = self.lstm(delta_sign.view(len(delta_sign),1 , -1), self.hidden_cell)
ds = torch.cat((fc1_out,lstm_out[:,0,:], weekday_vector),1)
output = self.fc2(ds)
return output
def train(model, trainData, validData, criterion, optimizer, epochs, seed):
manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
random.seed(seed)
for epoch in range(epochs):
# Restart model training status
model.train()
train_loss = 0.0
valid_loss = 0.0
# Training model
for batch in trainData:
batch_x, batch_y = batch
optimizer.zero_grad()
outputs = model(batch_x)
loss = criterion(outputs, batch_y)
loss.backward(retain_graph=True)
optimizer.step()
train_loss += loss.item()
# Turn model to evaluation mode
model.eval()
# Evaluate model on validation dataset
for batch in validData:
batch_x, batch_y = batch
outputs = model(batch_x)
loss = criterion(outputs, batch_y)
valid_loss += loss.item()
# Log epoch results
now = time.strftime("%H:%M:%S")
rnd_ltrn = round(train_loss, 3)
rnd_lvld = round(valid_loss, 3)
print("{}, epoch: {}, train: {}, valid: {}".format(now, epoch, rnd_ltrn, rnd_lvld))
# Set model to evaluation mode
model.eval() | [
"torch.manual_seed",
"torch.nn.Dropout",
"pandas.read_csv",
"torch.nn.LSTM",
"torch.nn.L1Loss",
"time.strftime",
"torch.Tensor",
"torch.utils.data.TensorDataset",
"random.seed",
"numpy.random.seed",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.zeros",
"torch.cat"
] | [((283, 300), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (294, 300), True, 'import pandas as pd\n'), ((1190, 1198), 'torch.nn.L1Loss', 'L1Loss', ([], {}), '()\n', (1196, 1198), False, 'from torch.nn import L1Loss\n'), ((2007, 2033), 'torch.Tensor', 'torch.Tensor', (['data[target]'], {}), '(data[target])\n', (2019, 2033), False, 'import torch\n'), ((2053, 2103), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_tensor', 'y_tensor'], {}), '(x_tensor, y_tensor)\n', (2083, 2103), False, 'import torch\n'), ((2116, 2170), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch'}), '(dataset, batch_size=batch)\n', (2143, 2170), False, 'import torch\n'), ((5324, 5341), 'torch.manual_seed', 'manual_seed', (['seed'], {}), '(seed)\n', (5335, 5341), False, 'from torch import nn, manual_seed\n'), ((5391, 5411), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5405, 5411), True, 'import numpy as np\n'), ((5416, 5433), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5427, 5433), False, 'import random\n'), ((4386, 4405), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (4396, 4405), False, 'from torch import nn, manual_seed\n'), ((4425, 4461), 'torch.nn.Linear', 'nn.Linear', (['input_layer', 'hidden_layer'], {}), '(input_layer, hidden_layer)\n', (4434, 4461), False, 'from torch import nn, manual_seed\n'), ((4481, 4515), 'torch.nn.Linear', 'nn.Linear', (['(hidden_layer * 2 + 7)', '(1)'], {}), '(hidden_layer * 2 + 7, 1)\n', (4490, 4515), False, 'from torch import nn, manual_seed\n'), ((4670, 4704), 'torch.nn.LSTM', 'nn.LSTM', (['input_layer', 'hidden_layer'], {}), '(input_layer, hidden_layer)\n', (4677, 4704), False, 'from torch import nn, manual_seed\n'), ((5133, 5191), 'torch.cat', 'torch.cat', (['(fc1_out, lstm_out[:, 0, :], weekday_vector)', '(1)'], {}), '((fc1_out, lstm_out[:, 0, :], weekday_vector), 1)\n', (5142, 5191), False, 'import torch\n'), ((6297, 6322), 'time.strftime', 'time.strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (6310, 6322), False, 'import time\n'), ((4541, 4577), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'self.hidden_layer'], {}), '(1, 1, self.hidden_layer)\n', (4552, 4577), False, 'import torch\n'), ((4605, 4641), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'self.hidden_layer'], {}), '(1, 1, self.hidden_layer)\n', (4616, 4641), False, 'import torch\n')] |
"""
Detection Testing Script.
This scripts reads a given config file and runs the evaluation.
It is an entry point that is made to evaluate standard models in FsDet.
In order to let one script support evaluation of many models,
this script contains logic that are specific to these built-in models and
therefore may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use FsDet as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import random
import numpy as np
import torch
from laplacianshot.trainer import LaplacianTrainer
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
from fsdet.config import get_cfg, set_global_cfg
from fsdet.engine import default_argument_parser, default_setup
from detectron2.config import global_cfg
import detectron2.utils.comm as comm
import os
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.engine import launch
from fsdet.evaluation import (verify_results)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
if args.opts:
cfg.merge_from_list(args.opts)
cfg.freeze()
set_global_cfg(cfg)
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
model = LaplacianTrainer.build_model(cfg)
if args.eval_iter != -1:
# load checkpoint at specified iteration
ckpt_file = os.path.join(
cfg.OUTPUT_DIR, "model_{:07d}.pth".format(args.eval_iter - 1)
)
resume = False
else:
# load checkpoint at last iteration
ckpt_file = cfg.MODEL.WEIGHTS
resume = True
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
ckpt_file, resume=resume
)
res = LaplacianTrainer.test(cfg, model,
support_augmentation=None,
use_classification_layer=True,
use_laplacianshot=True,
rectify_prototypes=None,
leverage_classification=None,
embeddings_type=None,
do_pca=None,
remove_possibly_duplicates=None,
knn=5,
lambda_factor=0.5,
max_iters=None,
laplacianshot_logs=False,
save_checkpoints=True,
plots=False)
if comm.is_main_process():
verify_results(cfg, res)
# save evaluation results in json
# os.makedirs(
# os.path.join(cfg.OUTPUT_DIR, "inference"), exist_ok=True
# )
# with open(
# os.path.join(cfg.OUTPUT_DIR, "inference", "res_final.json"),
# "w",
# ) as fp:
# json.dump(res, fp)
return res
if __name__ == "__main__":
args = default_argument_parser().parse_args()
if args.eval_during_train or args.eval_all:
args.dist_url = "tcp://127.0.0.1:{:05d}".format(
np.random.choice(np.arange(0, 65534))
)
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| [
"fsdet.config.set_global_cfg",
"fsdet.config.get_cfg",
"torch.manual_seed",
"fsdet.engine.default_argument_parser",
"detectron2.utils.comm.is_main_process",
"fsdet.engine.default_setup",
"fsdet.evaluation.verify_results",
"random.seed",
"detectron2.engine.launch",
"laplacianshot.trainer.LaplacianT... | [((735, 749), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (746, 749), False, 'import random\n'), ((750, 767), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (764, 767), True, 'import numpy as np\n'), ((768, 788), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (785, 788), False, 'import torch\n'), ((1222, 1231), 'fsdet.config.get_cfg', 'get_cfg', ([], {}), '()\n', (1229, 1231), False, 'from fsdet.config import get_cfg, set_global_cfg\n'), ((1352, 1371), 'fsdet.config.set_global_cfg', 'set_global_cfg', (['cfg'], {}), '(cfg)\n', (1366, 1371), False, 'from fsdet.config import get_cfg, set_global_cfg\n'), ((1376, 1400), 'fsdet.engine.default_setup', 'default_setup', (['cfg', 'args'], {}), '(cfg, args)\n', (1389, 1400), False, 'from fsdet.engine import default_argument_parser, default_setup\n'), ((1469, 1502), 'laplacianshot.trainer.LaplacianTrainer.build_model', 'LaplacianTrainer.build_model', (['cfg'], {}), '(cfg)\n', (1497, 1502), False, 'from laplacianshot.trainer import LaplacianTrainer\n'), ((1961, 2324), 'laplacianshot.trainer.LaplacianTrainer.test', 'LaplacianTrainer.test', (['cfg', 'model'], {'support_augmentation': 'None', 'use_classification_layer': '(True)', 'use_laplacianshot': '(True)', 'rectify_prototypes': 'None', 'leverage_classification': 'None', 'embeddings_type': 'None', 'do_pca': 'None', 'remove_possibly_duplicates': 'None', 'knn': '(5)', 'lambda_factor': '(0.5)', 'max_iters': 'None', 'laplacianshot_logs': '(False)', 'save_checkpoints': '(True)', 'plots': '(False)'}), '(cfg, model, support_augmentation=None,\n use_classification_layer=True, use_laplacianshot=True,\n rectify_prototypes=None, leverage_classification=None, embeddings_type=\n None, do_pca=None, remove_possibly_duplicates=None, knn=5,\n lambda_factor=0.5, max_iters=None, laplacianshot_logs=False,\n save_checkpoints=True, plots=False)\n', (1982, 2324), False, 'from laplacianshot.trainer import LaplacianTrainer\n'), ((2760, 2782), 'detectron2.utils.comm.is_main_process', 'comm.is_main_process', ([], {}), '()\n', (2780, 2782), True, 'import detectron2.utils.comm as comm\n'), ((3442, 3576), 'detectron2.engine.launch', 'launch', (['main', 'args.num_gpus'], {'num_machines': 'args.num_machines', 'machine_rank': 'args.machine_rank', 'dist_url': 'args.dist_url', 'args': '(args,)'}), '(main, args.num_gpus, num_machines=args.num_machines, machine_rank=\n args.machine_rank, dist_url=args.dist_url, args=(args,))\n', (3448, 3576), False, 'from detectron2.engine import launch\n'), ((2792, 2816), 'fsdet.evaluation.verify_results', 'verify_results', (['cfg', 'res'], {}), '(cfg, res)\n', (2806, 2816), False, 'from fsdet.evaluation import verify_results\n'), ((1841, 1894), 'detectron2.checkpoint.DetectionCheckpointer', 'DetectionCheckpointer', (['model'], {'save_dir': 'cfg.OUTPUT_DIR'}), '(model, save_dir=cfg.OUTPUT_DIR)\n', (1862, 1894), False, 'from detectron2.checkpoint import DetectionCheckpointer\n'), ((3195, 3220), 'fsdet.engine.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (3218, 3220), False, 'from fsdet.engine import default_argument_parser, default_setup\n'), ((3368, 3387), 'numpy.arange', 'np.arange', (['(0)', '(65534)'], {}), '(0, 65534)\n', (3377, 3387), True, 'import numpy as np\n')] |
import numpy as np
WALL = '%'
START = 'P'
DOT = '.'
SPACE = ' '
PATHCHAR = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
class MazeSearchNode:
def __init__(self, state, parent=None, cost=0):
self.state = state
self.parent = parent
self.cost = cost
self.ate = None
def __eq__(self, other):
return self.state == other.state
class MazeSearch:
"""A generalized maze search problem"""
def __init__(self, input_file):
"""input_file: path to input txt"""
self.init_st = None
self.maze = None
with open(input_file, 'r') as f:
self.maze, self.init_st = MazeSearch.text_to_maze(f.read())
self.nrows, self.ncols = self.maze.shape
self.curr_node = None
self.visited = set() # a set of states
self.frontier = list() # a list of nodes
@staticmethod
def text_to_maze(txt):
"""returns (initial_state, maze)
State is a tuple:
(initial_x, initial_y, frozenset([(bit_x,bit_y)]))
Use frozenset to ensure hashable.
Maze is a bool np.ndarray that denotes wall as False"""
lines = txt.splitlines()
nrow = len(lines)
ncol = len(lines[0])
maze = np.zeros((nrow, ncol), dtype=bool)
init_row = None
init_col = None
bits = []
for r in range(nrow):
for c in range(ncol):
car = lines[r][c]
maze[r, c] = not car == WALL
if car == 'P':
init_row, init_col = r, c
elif car == '.':
bits.append((r, c))
st = (init_row, init_col, frozenset(bits))
return (maze, st)
@staticmethod
def distance(x1, y1, x2, y2):
"""Manhattan distance"""
return abs(x1 - x2) + abs(y1 - y2)
def reached_goal(self):
"""Return True if current state is goal state"""
return len(self.curr_node.state[2]) == 0
def frontier_select(self):
"""select an appropriate node in self.frontier"""
pass
def expand(self):
"""
First, adds the current state to visited set.
Adds neighbors of current node to the frontier.
Won't add if action is invalid (hit wall).
Won't add if state has been visited.
If has appeared in frontier, replace the one in frontier if
this one's cost is lower."""
def try_add_node(row, col, old_bits):
ate = None
if (row,col) in old_bits:
new_state = (row, col, old_bits - {(row, col)})
ate = (row,col)
else:
new_state = (row, col, old_bits)
if not new_state in self.visited:
new_node = MazeSearchNode(new_state,
parent=self.curr_node,
cost=self.curr_node.cost + 1)
new_node.ate = ate
try:
# try replace a node in frontier that contains
# the same state but higher cost
ind = self.frontier.index(new_node)
if self.frontier[ind].cost > new_node.cost:
self.frontier[ind] = new_node
except ValueError:
self.frontier.append(new_node)
self.visited.add(self.curr_node.state)
curr_r, curr_c, curr_bits = self.curr_node.state
# UP
if self.maze[curr_r - 1, curr_c]:
try_add_node(curr_r - 1, curr_c, curr_bits)
# DOWN
if self.maze[curr_r + 1, curr_c]:
try_add_node(curr_r + 1, curr_c, curr_bits)
# LEFT
if self.maze[curr_r, curr_c - 1]:
try_add_node(curr_r, curr_c - 1, curr_bits)
# RIGHT
if self.maze[curr_r, curr_c + 1]:
try_add_node(curr_r, curr_c + 1, curr_bits)
def run_from_start(self):
"""reset states and run from initial state"""
self.visited.clear()
self.frontier.clear()
self.frontier.append(MazeSearchNode(self.init_st))
while not len(self.frontier) == 0:
self.curr_node = self.frontier_select()
if self.reached_goal():
break
else:
self.expand()
# self.print_curr_status() # @debug
self.report()
def print_curr_status(self):
# print('(%d, %d) expanded: %d cost: %d' %
# (self.curr_node.state[0],
# self.curr_node.state[1],
# len(self.visited),
# self.curr_node.cost), end='\r')
print(len(self.visited), end='\r')
def report(self):
"""prints string representation of the path for current node,
also reports stats for the last run"""
print('\nExpanded %d nodes' %
(len(self.visited),))
print('found path cost: %d' % (self.curr_node.cost,))
self.draw_path()
print('\n\n')
@staticmethod
def trace_path(node):
path = []
while True:
path.insert(0, (node.state[0], node.state[1]))
node = node.parent
if not node:
break
return path
def draw_path(self):
path = MazeSearch.trace_path(self.curr_node)
nrow, ncol = self.maze.shape
lines = [['%' for _ in range(ncol)] for _ in range(nrow)]
for row in range(nrow):
for col in range(ncol):
if self.maze[row, col]:
lines[row][col] = SPACE
if (row, col) in path:
lines[row][col] = DOT
lines = [''.join(r) for r in lines]
print('\n'.join(lines))
class BFS(MazeSearch):
"""BFS maze search. Selects oldest in frontier"""
def frontier_select(self):
return self.frontier.pop(0)
class DFS(MazeSearch):
"""DFS maze search. Selects latest in frontier"""
def frontier_select(self):
return self.frontier.pop()
class GreedySingle(MazeSearch):
"""Greedy best-first maze search.
Selects least heuristic cost in frontier"""
@staticmethod
def heuristic(node):
"""manhattan distance to the single bit"""
cost = 0
curr_r, curr_c = node.state[0], node.state[1]
for bit_r, bit_c in node.state[2]:
cost += GreedySingle.distance(curr_r, curr_c, bit_r, bit_c)
return cost
def frontier_select(self):
return self.frontier.pop(np.argmin(list(map(GreedySingle.heuristic, self.frontier))))
class AstarSingle(GreedySingle):
"""A* maze search.
Heuristic = sum of manhattan distance to all bits"""
@staticmethod
def evaluation(node):
return AstarSingle.heuristic(node) + node.cost
def frontier_select(self):
return self.frontier.pop(np.argmin(list(map(AstarSingle.evaluation, self.frontier))))
class AstarMulti(MazeSearch):
"""A* maze search.
Selects least heuristic + path cost in frontier"""
@staticmethod
def trace_path(node):
eat_order = []
path = []
while True:
path.insert(0, (node.state[0], node.state[1]))
if node.ate:
eat_order.insert(0, node.ate)
node = node.parent
if not node:
break
return (path, eat_order)
def draw_path(self):
path, eat_order = AstarMulti.trace_path(self.curr_node)
nrow, ncol = self.maze.shape
lines = [['%' for _ in range(ncol)] for _ in range(nrow)]
for row in range(nrow):
for col in range(ncol):
if self.maze[row, col]:
lines[row][col] = SPACE
for ind, (row, col) in enumerate(eat_order):
lines[row][col] = PATHCHAR[ind % len(PATHCHAR)]
lines = [''.join(r) for r in lines]
print('\n'.join(lines))
@staticmethod
def heuristic(node):
# cost = 0
# curr_r, curr_c = node.state[0], node.state[1]
# for bit_r, bit_c in node.state[2]:
# cost += AstarMulti.distance(curr_r, curr_c, bit_r, bit_c)
# return cost
return len(node.state[2])
# try:
# md = max(list(map(
# lambda b: AstarMulti.distance(node.state[0], node.state[1], b[0], b[1]),
# node.state[2])))
# except ValueError:
# md = 0
# return md
@staticmethod
def evaluation(node):
return AstarMulti.heuristic(node) + node.cost
def frontier_select(self):
return self.frontier.pop(np.argmin(list(map(AstarMulti.evaluation, self.frontier))))
class AstarSubopt(AstarMulti):
# def run_from_start(self):
# """reset states and run from initial state"""
# self.visited.clear()
# self.frontier.clear()
# self.frontier.append(MazeSearchNode(self.init_st))
# while not len(self.frontier) == 0:
# self.curr_node = self.frontier_select()
# if self.reached_goal():
# break
# else:
# self.expand()
# self.print_curr_status() # @debug
# self.report()
def heuristic(self, node):
try:
dists = list(map(
lambda b: self.distance(node.state[0], node.state[1], b[0], b[1]),
list(node.state[2])))
md = min(dists)
except ValueError:
md = 0
return len(node.state[2]) * md
def frontier_select(self):
look_num = 4
pop_ind = 0
if len(self.frontier) > look_num:
pop_ind = len(self.frontier) - look_num + np.argmin(list(map(self.heuristic, self.frontier[-look_num:])))
else:
pop_ind = np.argmin(list(map(self.heuristic, self.frontier)))
return self.frontier.pop(pop_ind)
def print_curr_status(self):
print('%d %d ' % (len(self.visited), len(self.curr_node.state[2])), end='\r') | [
"numpy.zeros"
] | [((1269, 1303), 'numpy.zeros', 'np.zeros', (['(nrow, ncol)'], {'dtype': 'bool'}), '((nrow, ncol), dtype=bool)\n', (1277, 1303), True, 'import numpy as np\n')] |
import numpy as np
from scipy.interpolate import CubicSpline
def gen_c2_spline(x, y, init_heading, slen_start, slen_end):
'''
Generates a C2 continuous spline using scipy CubicSpline lib
x: np.array of x-coordinate points
y: np.array of y-coordinate points
'''
# define mu, a virtual path variable of length 1 for each spline segment
assert(len(x) == len(y))
mu = np.arange(0,len(x), 1.0)
# build splines
cs_x = CubicSpline(mu, x,
bc_type=((1, slen_start * np.cos(init_heading)),
(2, 0.0)))
cs_y = CubicSpline(mu, y,
bc_type=((1, slen_start * np.sin(init_heading)),
(2, 0.0)))
return cs_x, cs_y
def calc_c2_traj(x, y, init_heading, eps = 0.005):
'''
Iteratively compute spline coefficients until spline length of first and last segment converges
x, y: 1D numpy array of control coordinates (x,y) through which spline must pass
'''
# Start with euclidean dist as slen approx for first and last segments
slen_start = np.sqrt((x[1] - x[0])**2 + (y[1] - y[0])**2)
slen_end = np.sqrt((x[-1] - x[-2])**2 + (y[-1] - y[-2])**2)
while True:
cx, cy = gen_c2_spline(x, y, init_heading, slen_start, slen_end)
coeffs_x_start = np.flip(cx.c[:,0])
coeffs_y_start = np.flip(cy.c[:,0])
coeffs_x_end = np.flip(cx.c[:,-1])
coeffs_y_end = np.flip(cy.c[:,-1])
slen_start_new = calc_spline_length(coeffs_x_start, coeffs_y_start)
slen_end_new = calc_spline_length(coeffs_x_end, coeffs_y_end)
if abs(slen_start_new - slen_start) < eps and abs(slen_end_new - slen_end) < eps:
break
else:
slen_start = slen_start_new
slen_end = slen_end_new
return cx, cy
def calc_spline_length(x_coeffs, y_coeffs, n_ips=100):
'''
Returns numerically computed length along cubic spline
x_coeffs: array of 4 x coefficients
y_coeffs: array of 4 y coefficients
'''
t_steps = np.linspace(0.0, 1.0, n_ips)
spl_coords = np.zeros((n_ips, 2))
spl_coords[:,0] = x_coeffs[0] \
+ x_coeffs[1] * t_steps \
+ x_coeffs[2] * np.power(t_steps, 2) \
+ x_coeffs[3] * np.power(t_steps, 3)
spl_coords[:,1] = y_coeffs[0] \
+ y_coeffs[1] * t_steps \
+ y_coeffs[2] * np.power(t_steps, 2) \
+ y_coeffs[3] * np.power(t_steps, 3)
slength = np.sum(np.sqrt(np.sum(np.power(np.diff(spl_coords, axis=0), 2), axis=1)))
return slength
def compute_spline_heading(x, y, t):
'''
Evaluate heading (psi) using parametric spline representation at each interpolated point in t
Ensure returned angles are between [-pi, pi]
t = path variable, a 1D numpy array
'''
psi = np.arctan2(y(t,1), x(t,1))
psi[psi >= np.pi] -= 2 * np.pi
psi[psi < -np.pi] += 2 * np.pi
return psi
def compute_spline_curvature(x, y, t):
'''
Evaluate K using parametric curvature equation at each point in t
Splines are given as parametric equations: x = x(t), y = y(t)
t = path variable, a 1D numpy array
Returns an array of curvatures along the spline with same shape as t
'''
K = np.abs( x(t,1)*y(t,2) - y(t,1)*x(t,2) ) / ( (x(t,1)**2) + y(t,1)**2 )**(3/2)
return K | [
"numpy.flip",
"numpy.sqrt",
"numpy.power",
"numpy.diff",
"numpy.linspace",
"numpy.zeros",
"numpy.cos",
"numpy.sin"
] | [((1099, 1147), 'numpy.sqrt', 'np.sqrt', (['((x[1] - x[0]) ** 2 + (y[1] - y[0]) ** 2)'], {}), '((x[1] - x[0]) ** 2 + (y[1] - y[0]) ** 2)\n', (1106, 1147), True, 'import numpy as np\n'), ((1159, 1211), 'numpy.sqrt', 'np.sqrt', (['((x[-1] - x[-2]) ** 2 + (y[-1] - y[-2]) ** 2)'], {}), '((x[-1] - x[-2]) ** 2 + (y[-1] - y[-2]) ** 2)\n', (1166, 1211), True, 'import numpy as np\n'), ((2067, 2095), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'n_ips'], {}), '(0.0, 1.0, n_ips)\n', (2078, 2095), True, 'import numpy as np\n'), ((2113, 2133), 'numpy.zeros', 'np.zeros', (['(n_ips, 2)'], {}), '((n_ips, 2))\n', (2121, 2133), True, 'import numpy as np\n'), ((1323, 1342), 'numpy.flip', 'np.flip', (['cx.c[:, 0]'], {}), '(cx.c[:, 0])\n', (1330, 1342), True, 'import numpy as np\n'), ((1367, 1386), 'numpy.flip', 'np.flip', (['cy.c[:, 0]'], {}), '(cy.c[:, 0])\n', (1374, 1386), True, 'import numpy as np\n'), ((1409, 1429), 'numpy.flip', 'np.flip', (['cx.c[:, -1]'], {}), '(cx.c[:, -1])\n', (1416, 1429), True, 'import numpy as np\n'), ((1452, 1472), 'numpy.flip', 'np.flip', (['cy.c[:, -1]'], {}), '(cy.c[:, -1])\n', (1459, 1472), True, 'import numpy as np\n'), ((2328, 2348), 'numpy.power', 'np.power', (['t_steps', '(3)'], {}), '(t_steps, 3)\n', (2336, 2348), True, 'import numpy as np\n'), ((2538, 2558), 'numpy.power', 'np.power', (['t_steps', '(3)'], {}), '(t_steps, 3)\n', (2546, 2558), True, 'import numpy as np\n'), ((2265, 2285), 'numpy.power', 'np.power', (['t_steps', '(2)'], {}), '(t_steps, 2)\n', (2273, 2285), True, 'import numpy as np\n'), ((2475, 2495), 'numpy.power', 'np.power', (['t_steps', '(2)'], {}), '(t_steps, 2)\n', (2483, 2495), True, 'import numpy as np\n'), ((2613, 2640), 'numpy.diff', 'np.diff', (['spl_coords'], {'axis': '(0)'}), '(spl_coords, axis=0)\n', (2620, 2640), True, 'import numpy as np\n'), ((527, 547), 'numpy.cos', 'np.cos', (['init_heading'], {}), '(init_heading)\n', (533, 547), True, 'import numpy as np\n'), ((666, 686), 'numpy.sin', 'np.sin', (['init_heading'], {}), '(init_heading)\n', (672, 686), True, 'import numpy as np\n')] |
import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
import pickle
import ROLO_utils as util
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = '/home/marc/ROLO/3rd\ party_upgrade/weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.08
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
w_img, h_img = [352, 240]
num_feat = 4096
num_predict = 6 # final output of LSTM 6 loc parameters
num_heatmap = 1024
def __init__(self, argvs=[]):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self, argvs):
for i in range(1, len(argvs), 2):
if argvs[i] == '-fromfile': self.fromfile = argvs[i + 1]
if argvs[i] == '-tofile_img': self.tofile_img = argvs[i + 1]; self.filewrite_img = True
if argvs[i] == '-tofile_txt': self.tofile_txt = argvs[i + 1]; self.filewrite_txt = True
if argvs[i] == '-imshow':
if argvs[i + 1] == '1':
self.imshow = True
else:
self.imshow = False
if argvs[i] == '-disp_console':
if argvs[i + 1] == '1':
self.disp_console = True
else:
self.disp_console = False
def build_networks(self):
if self.disp_console: print("Building YOLO_small graph...")
self.x = tf.placeholder('float32', [None, 448, 448, 3])
self.conv_1 = self.conv_layer(1, self.x, 64, 7, 2)
self.pool_2 = self.pooling_layer(2, self.conv_1, 2, 2)
self.conv_3 = self.conv_layer(3, self.pool_2, 192, 3, 1)
self.pool_4 = self.pooling_layer(4, self.conv_3, 2, 2)
self.conv_5 = self.conv_layer(5, self.pool_4, 128, 1, 1)
self.conv_6 = self.conv_layer(6, self.conv_5, 256, 3, 1)
self.conv_7 = self.conv_layer(7, self.conv_6, 256, 1, 1)
self.conv_8 = self.conv_layer(8, self.conv_7, 512, 3, 1)
self.pool_9 = self.pooling_layer(9, self.conv_8, 2, 2)
self.conv_10 = self.conv_layer(10, self.pool_9, 256, 1, 1)
self.conv_11 = self.conv_layer(11, self.conv_10, 512, 3, 1)
self.conv_12 = self.conv_layer(12, self.conv_11, 256, 1, 1)
self.conv_13 = self.conv_layer(13, self.conv_12, 512, 3, 1)
self.conv_14 = self.conv_layer(14, self.conv_13, 256, 1, 1)
self.conv_15 = self.conv_layer(15, self.conv_14, 512, 3, 1)
self.conv_16 = self.conv_layer(16, self.conv_15, 256, 1, 1)
self.conv_17 = self.conv_layer(17, self.conv_16, 512, 3, 1)
self.conv_18 = self.conv_layer(18, self.conv_17, 512, 1, 1)
self.conv_19 = self.conv_layer(19, self.conv_18, 1024, 3, 1)
self.pool_20 = self.pooling_layer(20, self.conv_19, 2, 2)
self.conv_21 = self.conv_layer(21, self.pool_20, 512, 1, 1)
self.conv_22 = self.conv_layer(22, self.conv_21, 1024, 3, 1)
self.conv_23 = self.conv_layer(23, self.conv_22, 512, 1, 1)
self.conv_24 = self.conv_layer(24, self.conv_23, 1024, 3, 1)
self.conv_25 = self.conv_layer(25, self.conv_24, 1024, 3, 1)
self.conv_26 = self.conv_layer(26, self.conv_25, 1024, 3, 2)
self.conv_27 = self.conv_layer(27, self.conv_26, 1024, 3, 1)
self.conv_28 = self.conv_layer(28, self.conv_27, 1024, 3, 1)
self.fc_29 = self.fc_layer(29, self.conv_28, 512, flat=True, linear=False)
self.fc_30 = self.fc_layer(30, self.fc_29, 4096, flat=False, linear=False)
# skip dropout_31
self.fc_32 = self.fc_layer(32, self.fc_30, 1470, flat=False, linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weights_file)
if self.disp_console: print("Loading complete!" + '\n')
def conv_layer(self, idx, inputs, filters, size, stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size, size, int(channels), filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size // 2
pad_mat = np.array([[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]])
inputs_pad = tf.pad(inputs, pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',
name=str(idx) + '_conv')
conv_biased = tf.add(conv, biases, name=str(idx) + '_conv_biased')
if self.disp_console: print(
' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (
idx, size, size, stride, filters, int(channels)))
return tf.maximum(self.alpha * conv_biased, conv_biased, name=str(idx) + '_leaky_relu')
def pooling_layer(self, idx, inputs, size, stride):
if self.disp_console: print(
' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx, size, size, stride))
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1], strides=[1, stride, stride, 1], padding='SAME',
name=str(idx) + '_pool')
def fc_layer(self, idx, inputs, hiddens, flat=False, linear=False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1] * input_shape[2] * input_shape[3]
inputs_transposed = tf.transpose(inputs, (0, 3, 1, 2))
inputs_processed = tf.reshape(inputs_transposed, [-1, dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim, hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console: print(
' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (
idx, hiddens, int(dim), int(flat), 1 - int(linear)))
if linear: return tf.add(tf.matmul(inputs_processed, weight), biases, name=str(idx) + '_fc')
ip = tf.add(tf.matmul(inputs_processed, weight), biases)
return tf.maximum(self.alpha * ip, ip, name=str(idx) + '_fc')
def detect_from_cvmat(self, img):
s = time.time()
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32, feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img, self.result)
strtime = str(time.time() - s)
if self.disp_console: print('Elapsed time : ' + strtime + ' secs' + '\n')
def detect_from_file(self, filename):
if self.disp_console: print('Detect from ' + filename)
img = cv2.imread(filename)
# img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt', 'r').readlines(), dtype='float32')
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0, y, x, c] = f[c * 448 * 448 + y * 448 + x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32, feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes, img)
def interpret_output(self, output):
probs = np.zeros((7, 7, 2, 20))
class_probs = np.reshape(output[0:980], (7, 7, 20))
scales = np.reshape(output[980:1078], (7, 7, 2))
boxes = np.reshape(output[1078:], (7, 7, 2, 4))
offset = np.transpose(np.reshape(np.array([np.arange(7)] * 14), (2, 7, 7)), (1, 2, 0))
boxes[:, :, :, 0] += offset
boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
boxes[:, :, :, 0:2] = boxes[:, :, :, 0:2] / 7.0
boxes[:, :, :, 2] = np.multiply(boxes[:, :, :, 2], boxes[:, :, :, 2])
boxes[:, :, :, 3] = np.multiply(boxes[:, :, :, 3], boxes[:, :, :, 3])
boxes[:, :, :, 0] *= self.w_img
boxes[:, :, :, 1] *= self.h_img
boxes[:, :, :, 2] *= self.w_img
boxes[:, :, :, 3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:, :, i, j] = np.multiply(class_probs[:, :, j], scales[:, :, i])
filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs, axis=3)[
filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0: continue
for j in range(i + 1, len(boxes_filtered)):
if self.iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]], boxes_filtered[i][0], boxes_filtered[i][1],
boxes_filtered[i][2], boxes_filtered[i][3], probs_filtered[i]])
return result
def show_results(self, img, results):
img_cp = img.copy()
if self.filewrite_txt:
ftxt = open(self.tofile_txt, 'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3]) // 2
h = int(results[i][4]) // 2
if self.disp_console: print(
' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(
int(results[i][3])) + ',' + str(int(results[i][4])) + '], Confidence = ' + str(results[i][5]))
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, results[i][0] + ' : %.2f' % results[i][5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
if self.filewrite_txt:
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h) + ',' + str(
results[i][5]) + '\n')
if self.filewrite_img:
if self.disp_console: print(' image file writed : ' + self.tofile_img)
cv2.imwrite(self.tofile_img, img_cp)
if self.imshow:
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(0)
if self.filewrite_txt:
if self.disp_console: print(' txt file writed : ' + self.tofile_txt)
ftxt.close()
def iou(self, box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - max(box1[0] - 0.5 * box1[2],
box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - max(box1[1] - 0.5 * box1[3],
box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
return intersection / (box1[2] * box1[3] + box2[2] * box2[3] - intersection)
# my addition
def createFolder(self, path):
if not os.path.exists(path):
os.makedirs(path)
def debug_location(self, img, location):
img_cp = img.copy()
x = int(location[1])
y = int(location[2])
w = int(location[3]) // 2
h = int(location[4]) // 2
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(1)
def debug_locations(self, img, locations):
img_cp = img.copy()
for location in locations:
x = int(location[1])
y = int(location[2])
w = int(location[3]) // 2
h = int(location[4]) // 2
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(1)
def debug_gt_location(self, img, location):
img_cp = img.copy()
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('gt', img_cp)
cv2.waitKey(1)
def file_to_img(self, filepath):
img = cv2.imread(filepath)
return img
def file_to_video(self, filepath):
try:
video = cv2.VideoCapture(filepath)
except IOError:
print('cannot open video file: ' + filepath)
else:
print('unknown error reading video file')
return video
def iou(self, box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - max(box1[0] - 0.5 * box1[2],
box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - max(box1[1] - 0.5 * box1[3],
box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
return intersection / (box1[2] * box1[3] + box2[2] * box2[3] - intersection)
def find_iou_cost(self, pred_locs, gts):
# for each element in the batch, find its iou. output a list of ious.
cost = 0
batch_size = len(pred_locs)
assert (len(gts) == batch_size)
print("batch_size: ")
ious = []
for i in range(batch_size):
pred_loc = pred_locs[i]
gt = gts[i]
iou_ = self.iou(pred_loc, gt)
ious.append(self, iou_)
return ious
def load_folder(self, path):
paths = [os.path.join(path, fn) for fn in next(os.walk(path))[2]]
# return paths
return sorted(paths)
def load_dataset_gt(self, gt_file):
txtfile = open(gt_file, "r")
lines = txtfile.read().split('\n') # '\r\n'
return lines
def find_gt_location(self, lines, id):
line = lines[id]
elems = line.split('\t') # for gt type 2
if len(elems) < 4:
elems = line.split(',') # for gt type 1
x1 = elems[0]
y1 = elems[1]
w = elems[2]
h = elems[3]
gt_location = [int(x1), int(y1), int(w), int(h)]
return gt_location
def find_best_location(self, locations, gt_location):
# locations (class, x, y, w, h, prob); (x, y) is the middle pt of the rect
# gt_location (x1, y1, w, h)
x1 = gt_location[0]
y1 = gt_location[1]
w = gt_location[2]
h = gt_location[3]
gt_location_revised = [x1 + w / 2, y1 + h / 2, w, h]
max_ious = 0
for id, location in enumerate(locations):
location_revised = location[1:5]
print("location: ", location_revised)
print("gt_location: ", gt_location_revised)
ious = self.iou(location_revised, gt_location_revised)
if ious >= max_ious:
max_ious = ious
index = id
print("Max IOU: " + str(max_ious))
if max_ious != 0:
best_location = locations[index]
class_index = self.classes.index(best_location[0])
best_location[0] = class_index
return best_location
else: # it means the detection failed, no intersection with the ground truth
return [0, 0, 0, 0, 0, 0]
def save_yolo_output(self, out_fold, yolo_output, filename):
name_no_ext = os.path.splitext(filename)[0]
output_name = name_no_ext
path = os.path.join(out_fold, output_name)
np.save(path, yolo_output)
def location_from_0_to_1(self, wid, ht, location):
location[1] /= wid
location[2] /= ht
location[3] /= wid
location[4] /= ht
return location
def gt_location_from_0_to_1(self, wid, ht, location):
wid *= 1.0
ht *= 1.0
location[0] /= wid
location[1] /= ht
location[2] /= wid
location[3] /= ht
return location
def locations_normal(self, wid, ht, locations):
wid *= 1.0
ht *= 1.0
locations[1] *= wid
locations[2] *= ht
locations[3] *= wid
locations[4] *= ht
return locations
def cal_yolo_loss(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2] / 2
location[1] = location[1] - location[3] / 2
loss = sum([(location[i] - gt_location[i]) ** 2 for i in range(4)]) * 100 / 4
return loss
def cal_yolo_IOU(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2] / 2
location[1] = location[1] - location[3] / 2
loss = self.iou(location, gt_location)
return loss
def prepare_training_data(self, img_fold, gt_file,
out_fold): # [or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths = self.load_folder(img_fold)
gt_locations = self.load_dataset_gt(gt_file)
avg_loss = 0
total = 0
total_time = 0
for id, path in enumerate(paths):
filename = os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
start_time = time.time()
feature = self.sess.run(self.fc_30, feed_dict=in_dict)
cycle_time = time.time() - start_time
print('cycle time= ', cycle_time)
total_time += cycle_time
output = self.sess.run(self.fc_32, feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations,
gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss = self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output = np.concatenate(
(np.reshape(feature, [-1, self.num_feat]),
np.reshape(location, [-1, self.num_predict])),
axis=1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss / total
print("YOLO avg_loss: ", avg_loss)
print("Time Spent on Tracking: " + str(total_time))
print("fps: " + str(id / total_time))
return
def loc_to_coordinates(self, loc):
loc = [i * 32 for i in loc]
x1 = int(loc[0] - loc[2] / 2)
y1 = int(loc[1] - loc[3] / 2)
x2 = int(loc[0] + loc[2] / 2)
y2 = int(loc[1] + loc[3] / 2)
return [x1, y1, x2, y2]
def coordinates_to_heatmap_vec(self, coord):
heatmap_vec = np.zeros(1024)
print(coord)
[classnum, x1, y1, x2, y2, prob] = coord
[x1, y1, x2, y2] = self.loc_to_coordinates([x1, y1, x2, y2])
for y in range(y1, y2):
for x in range(x1, x2):
index = y * 32 + x
heatmap_vec[index] = 1.0
return heatmap_vec
def prepare_training_data_heatmap(self, img_fold, gt_file,
out_fold): # [or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths = self.load_folder(img_fold)
gt_locations = self.load_dataset_gt(gt_file)
avg_loss = 0
total = 0
for id, path in enumerate(paths):
filename = os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
feature = self.sess.run(self.fc_30, feed_dict=in_dict)
output = self.sess.run(self.fc_32, feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations,
gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss = self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
heatmap_vec = self.coordinates_to_heatmap_vec(location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output = np.concatenate(
(np.reshape(feature, [-1, self.num_feat]),
np.reshape(heatmap_vec, [-1, self.num_heatmap])),
axis=1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss / total
print("YOLO avg_loss: ", avg_loss)
return
def prepare_training_data_multiTarget(self, img_fold, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
print(img_fold)
paths = self.load_folder(img_fold)
avg_loss = 0
total = 0
for id, path in enumerate(paths):
filename = os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
feature = self.sess.run(self.fc_30, feed_dict=in_dict)
output = self.sess.run(self.fc_32, feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
self.debug_locations(img, locations)
# change location into [0, 1]
for i in range(0, len(locations)):
class_index = self.classes.index(locations[i][0])
locations[i][0] = class_index
locations[i] = self.location_from_0_to_1(self.w_img, self.h_img, locations[i])
if len(locations) == 1:
print('len(locations)= 1\n')
yolo_output = [[np.reshape(feature, [-1, self.num_feat])],
[np.reshape(locations, [-1, self.num_predict]), [0, 0, 0, 0, 0, 0]]]
else:
yolo_output = [[np.reshape(feature, [-1, self.num_feat])],
[np.reshape(locations, [-1, self.num_predict])]]
self.save_yolo_output(out_fold, yolo_output, filename)
return
'''----------------------------------------main-----------------------------------------------------'''
def main(argvs):
yolo = YOLO_TF(argvs)
test = 18
heatmap = True # True
'''
VOT30
0:'Human2'
1:'Human9'
2:'Gym'
3:'Human8'
4:'Skater'
5:'Suv'
6:'BlurBody'
7:'CarScale'
8:'Dancer2'
9:'BlurCar1'
10:'Dog'
11:'Jump'
12:'Singer2'
13:'Woman'
14:'David3'
15:'Dancer'
16:'Human7'
17:'Bird1'
18:'Car4'
19:'CarDark'
20:'Couple'
21:'Diving'
22:'Human3'
23:'Skating1'
24:'Human6'
25:'Singer1'
26:'Skater2'
27:'Walking2'
28:'BlurCar3'
29:'Girl2'
MOT2016
30:'MOT16-02'
31:'MOT16-04'
32:'MOT16-05'
33:'MOT16-09'
34:'MOT16-10'
35:'MOT16-11'
36:'MOT16-13'
37:'MOT16-01'
38:'MOT16-03'
39:'MOT16-06'
40:'MOT16-07'
41:'MOT16-08'
42:'MOT16-12'
43:'MOT16-14'
'''
[yolo.w_img, yolo.h_img, sequence_name, dummy_1, dummy_2] = util.choose_video_sequence(test)
if (test >= 0 and test <= 29) or (test >= 90):
root_folder = '/home/marc/Documents/benchmark/DATA'
img_fold = os.path.join(root_folder, sequence_name, 'img/')
elif test <= 36:
root_folder = 'benchmark/MOT/MOT2016/train'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
elif test <= 43:
root_folder = 'benchmark/MOT/MOT2016/test'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
gt_file = os.path.join(root_folder, sequence_name, 'groundtruth_rect.txt')
out_fold = os.path.join(root_folder, sequence_name, 'yolo_out/')
heat_fold = os.path.join(root_folder, sequence_name, 'yolo_heat/')
yolo.createFolder(out_fold)
yolo.createFolder(heat_fold)
if heatmap is True:
yolo.prepare_training_data_heatmap(img_fold, gt_file, heat_fold)
else:
if (test >= 0 and test <= 29) or (test >= 90):
yolo.prepare_training_data(img_fold, gt_file, out_fold)
else:
yolo.prepare_training_data_multiTarget(img_fold, out_fold)
if __name__ == '__main__':
main(sys.argv)
| [
"cv2.rectangle",
"tensorflow.pad",
"tensorflow.transpose",
"cv2.imshow",
"numpy.argsort",
"numpy.array",
"numpy.save",
"os.walk",
"numpy.arange",
"os.path.exists",
"numpy.multiply",
"numpy.reshape",
"ROLO_utils.choose_video_sequence",
"tensorflow.Session",
"tensorflow.placeholder",
"nu... | [((24313, 24345), 'ROLO_utils.choose_video_sequence', 'util.choose_video_sequence', (['test'], {}), '(test)\n', (24339, 24345), True, 'import ROLO_utils as util\n'), ((24776, 24840), 'os.path.join', 'os.path.join', (['root_folder', 'sequence_name', '"""groundtruth_rect.txt"""'], {}), "(root_folder, sequence_name, 'groundtruth_rect.txt')\n", (24788, 24840), False, 'import os\n'), ((24853, 24906), 'os.path.join', 'os.path.join', (['root_folder', 'sequence_name', '"""yolo_out/"""'], {}), "(root_folder, sequence_name, 'yolo_out/')\n", (24865, 24906), False, 'import os\n'), ((24920, 24974), 'os.path.join', 'os.path.join', (['root_folder', 'sequence_name', '"""yolo_heat/"""'], {}), "(root_folder, sequence_name, 'yolo_heat/')\n", (24932, 24974), False, 'import os\n'), ((1628, 1674), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None, 448, 448, 3]'], {}), "('float32', [None, 448, 448, 3])\n", (1642, 1674), True, 'import tensorflow as tf\n'), ((3640, 3652), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3650, 3652), True, 'import tensorflow as tf\n'), ((3715, 3731), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3729, 3731), True, 'import tensorflow as tf\n'), ((4124, 4194), 'numpy.array', 'np.array', (['[[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]]'], {}), '([[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]])\n', (4132, 4194), True, 'import numpy as np\n'), ((4210, 4233), 'tensorflow.pad', 'tf.pad', (['inputs', 'pad_mat'], {}), '(inputs, pad_mat)\n', (4216, 4233), True, 'import tensorflow as tf\n'), ((5961, 5972), 'time.time', 'time.time', ([], {}), '()\n', (5970, 5972), False, 'import time\n'), ((6029, 6056), 'cv2.resize', 'cv2.resize', (['img', '(448, 448)'], {}), '(img, (448, 448))\n', (6039, 6056), False, 'import cv2\n'), ((6069, 6113), 'cv2.cvtColor', 'cv2.cvtColor', (['img_resized', 'cv2.COLOR_BGR2RGB'], {}), '(img_resized, cv2.COLOR_BGR2RGB)\n', (6081, 6113), False, 'import cv2\n'), ((6133, 6152), 'numpy.asarray', 'np.asarray', (['img_RGB'], {}), '(img_RGB)\n', (6143, 6152), True, 'import numpy as np\n'), ((6164, 6207), 'numpy.zeros', 'np.zeros', (['(1, 448, 448, 3)'], {'dtype': '"""float32"""'}), "((1, 448, 448, 3), dtype='float32')\n", (6172, 6207), True, 'import numpy as np\n'), ((6653, 6673), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (6663, 6673), False, 'import cv2\n'), ((6896, 6939), 'numpy.zeros', 'np.zeros', (['(1, 448, 448, 3)'], {'dtype': '"""float32"""'}), "((1, 448, 448, 3), dtype='float32')\n", (6904, 6939), True, 'import numpy as np\n'), ((7229, 7253), 'cv2.imread', 'cv2.imread', (['"""person.jpg"""'], {}), "('person.jpg')\n", (7239, 7253), False, 'import cv2\n'), ((7339, 7362), 'numpy.zeros', 'np.zeros', (['(7, 7, 2, 20)'], {}), '((7, 7, 2, 20))\n', (7347, 7362), True, 'import numpy as np\n'), ((7379, 7416), 'numpy.reshape', 'np.reshape', (['output[0:980]', '(7, 7, 20)'], {}), '(output[0:980], (7, 7, 20))\n', (7389, 7416), True, 'import numpy as np\n'), ((7428, 7467), 'numpy.reshape', 'np.reshape', (['output[980:1078]', '(7, 7, 2)'], {}), '(output[980:1078], (7, 7, 2))\n', (7438, 7467), True, 'import numpy as np\n'), ((7478, 7517), 'numpy.reshape', 'np.reshape', (['output[1078:]', '(7, 7, 2, 4)'], {}), '(output[1078:], (7, 7, 2, 4))\n', (7488, 7517), True, 'import numpy as np\n'), ((7661, 7692), 'numpy.transpose', 'np.transpose', (['offset', '(1, 0, 2)'], {}), '(offset, (1, 0, 2))\n', (7673, 7692), True, 'import numpy as np\n'), ((7765, 7814), 'numpy.multiply', 'np.multiply', (['boxes[:, :, :, 2]', 'boxes[:, :, :, 2]'], {}), '(boxes[:, :, :, 2], boxes[:, :, :, 2])\n', (7776, 7814), True, 'import numpy as np\n'), ((7837, 7886), 'numpy.multiply', 'np.multiply', (['boxes[:, :, :, 3]', 'boxes[:, :, :, 3]'], {}), '(boxes[:, :, :, 3], boxes[:, :, :, 3])\n', (7848, 7886), True, 'import numpy as np\n'), ((8166, 8213), 'numpy.array', 'np.array', (['(probs >= self.threshold)'], {'dtype': '"""bool"""'}), "(probs >= self.threshold, dtype='bool')\n", (8174, 8213), True, 'import numpy as np\n'), ((8235, 8263), 'numpy.nonzero', 'np.nonzero', (['filter_mat_probs'], {}), '(filter_mat_probs)\n', (8245, 8263), True, 'import numpy as np\n'), ((8967, 9011), 'numpy.array', 'np.array', (['(probs_filtered > 0.0)'], {'dtype': '"""bool"""'}), "(probs_filtered > 0.0, dtype='bool')\n", (8975, 9011), True, 'import numpy as np\n'), ((11463, 11532), 'cv2.rectangle', 'cv2.rectangle', (['img_cp', '(x - w, y - h)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)\n', (11476, 11532), False, 'import cv2\n'), ((11535, 11614), 'cv2.rectangle', 'cv2.rectangle', (['img_cp', '(x - w, y - h - 20)', '(x + w, y - h)', '(125, 125, 125)', '(-1)'], {}), '(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)\n', (11548, 11614), False, 'import cv2\n'), ((11757, 11799), 'cv2.imshow', 'cv2.imshow', (['"""YOLO_small detection"""', 'img_cp'], {}), "('YOLO_small detection', img_cp)\n", (11767, 11799), False, 'import cv2\n'), ((11802, 11816), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (11813, 11816), False, 'import cv2\n'), ((12319, 12361), 'cv2.imshow', 'cv2.imshow', (['"""YOLO_small detection"""', 'img_cp'], {}), "('YOLO_small detection', img_cp)\n", (12329, 12361), False, 'import cv2\n'), ((12364, 12378), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (12375, 12378), False, 'import cv2\n'), ((12541, 12602), 'cv2.rectangle', 'cv2.rectangle', (['img_cp', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img_cp, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (12554, 12602), False, 'import cv2\n'), ((12605, 12629), 'cv2.imshow', 'cv2.imshow', (['"""gt"""', 'img_cp'], {}), "('gt', img_cp)\n", (12615, 12629), False, 'import cv2\n'), ((12632, 12646), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (12643, 12646), False, 'import cv2\n'), ((12690, 12710), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (12700, 12710), False, 'import cv2\n'), ((15408, 15443), 'os.path.join', 'os.path.join', (['out_fold', 'output_name'], {}), '(out_fold, output_name)\n', (15420, 15443), False, 'import os\n'), ((15446, 15472), 'numpy.save', 'np.save', (['path', 'yolo_output'], {}), '(path, yolo_output)\n', (15453, 15472), True, 'import numpy as np\n'), ((19100, 19114), 'numpy.zeros', 'np.zeros', (['(1024)'], {}), '(1024)\n', (19108, 19114), True, 'import numpy as np\n'), ((24462, 24510), 'os.path.join', 'os.path.join', (['root_folder', 'sequence_name', '"""img/"""'], {}), "(root_folder, sequence_name, 'img/')\n", (24474, 24510), False, 'import os\n'), ((3669, 3698), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (3696, 3698), True, 'import tensorflow as tf\n'), ((4053, 4086), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[filters]'}), '(0.1, shape=[filters])\n', (4064, 4086), True, 'import tensorflow as tf\n'), ((5225, 5259), 'tensorflow.transpose', 'tf.transpose', (['inputs', '(0, 3, 1, 2)'], {}), '(inputs, (0, 3, 1, 2))\n', (5237, 5259), True, 'import tensorflow as tf\n'), ((5282, 5322), 'tensorflow.reshape', 'tf.reshape', (['inputs_transposed', '[-1, dim]'], {}), '(inputs_transposed, [-1, dim])\n', (5292, 5322), True, 'import tensorflow as tf\n'), ((5407, 5454), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[dim, hiddens]'], {'stddev': '(0.1)'}), '([dim, hiddens], stddev=0.1)\n', (5426, 5454), True, 'import tensorflow as tf\n'), ((5479, 5512), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[hiddens]'}), '(0.1, shape=[hiddens])\n', (5490, 5512), True, 'import tensorflow as tf\n'), ((5810, 5845), 'tensorflow.matmul', 'tf.matmul', (['inputs_processed', 'weight'], {}), '(inputs_processed, weight)\n', (5819, 5845), True, 'import tensorflow as tf\n'), ((8420, 8455), 'numpy.argmax', 'np.argmax', (['filter_mat_probs'], {'axis': '(3)'}), '(filter_mat_probs, axis=3)\n', (8429, 8455), True, 'import numpy as np\n'), ((10503, 10539), 'cv2.imwrite', 'cv2.imwrite', (['self.tofile_img', 'img_cp'], {}), '(self.tofile_img, img_cp)\n', (10514, 10539), False, 'import cv2\n'), ((10561, 10603), 'cv2.imshow', 'cv2.imshow', (['"""YOLO_small detection"""', 'img_cp'], {}), "('YOLO_small detection', img_cp)\n", (10571, 10603), False, 'import cv2\n'), ((10607, 10621), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (10618, 10621), False, 'import cv2\n'), ((11251, 11271), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11265, 11271), False, 'import os\n'), ((11276, 11293), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (11287, 11293), False, 'import os\n'), ((12022, 12091), 'cv2.rectangle', 'cv2.rectangle', (['img_cp', '(x - w, y - h)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)\n', (12035, 12091), False, 'import cv2\n'), ((12095, 12174), 'cv2.rectangle', 'cv2.rectangle', (['img_cp', '(x - w, y - h - 20)', '(x + w, y - h)', '(125, 125, 125)', '(-1)'], {}), '(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)\n', (12108, 12174), False, 'import cv2\n'), ((12779, 12805), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filepath'], {}), '(filepath)\n', (12795, 12805), False, 'import cv2\n'), ((13811, 13833), 'os.path.join', 'os.path.join', (['path', 'fn'], {}), '(path, fn)\n', (13823, 13833), False, 'import os\n'), ((15341, 15367), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (15357, 15367), False, 'import os\n'), ((17140, 17162), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (17156, 17162), False, 'import os\n'), ((17329, 17356), 'cv2.resize', 'cv2.resize', (['img', '(448, 448)'], {}), '(img, (448, 448))\n', (17339, 17356), False, 'import cv2\n'), ((17370, 17414), 'cv2.cvtColor', 'cv2.cvtColor', (['img_resized', 'cv2.COLOR_BGR2RGB'], {}), '(img_resized, cv2.COLOR_BGR2RGB)\n', (17382, 17414), False, 'import cv2\n'), ((17435, 17454), 'numpy.asarray', 'np.asarray', (['img_RGB'], {}), '(img_RGB)\n', (17445, 17454), True, 'import numpy as np\n'), ((17467, 17510), 'numpy.zeros', 'np.zeros', (['(1, 448, 448, 3)'], {'dtype': '"""float32"""'}), "((1, 448, 448, 3), dtype='float32')\n", (17475, 17510), True, 'import numpy as np\n'), ((17610, 17621), 'time.time', 'time.time', ([], {}), '()\n', (17619, 17621), False, 'import time\n'), ((19876, 19898), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (19892, 19898), False, 'import os\n'), ((20065, 20092), 'cv2.resize', 'cv2.resize', (['img', '(448, 448)'], {}), '(img, (448, 448))\n', (20075, 20092), False, 'import cv2\n'), ((20106, 20150), 'cv2.cvtColor', 'cv2.cvtColor', (['img_resized', 'cv2.COLOR_BGR2RGB'], {}), '(img_resized, cv2.COLOR_BGR2RGB)\n', (20118, 20150), False, 'import cv2\n'), ((20171, 20190), 'numpy.asarray', 'np.asarray', (['img_RGB'], {}), '(img_RGB)\n', (20181, 20190), True, 'import numpy as np\n'), ((20203, 20246), 'numpy.zeros', 'np.zeros', (['(1, 448, 448, 3)'], {'dtype': '"""float32"""'}), "((1, 448, 448, 3), dtype='float32')\n", (20211, 20246), True, 'import numpy as np\n'), ((21790, 21812), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (21806, 21812), False, 'import os\n'), ((21980, 22007), 'cv2.resize', 'cv2.resize', (['img', '(448, 448)'], {}), '(img, (448, 448))\n', (21990, 22007), False, 'import cv2\n'), ((22021, 22065), 'cv2.cvtColor', 'cv2.cvtColor', (['img_resized', 'cv2.COLOR_BGR2RGB'], {}), '(img_resized, cv2.COLOR_BGR2RGB)\n', (22033, 22065), False, 'import cv2\n'), ((22086, 22105), 'numpy.asarray', 'np.asarray', (['img_RGB'], {}), '(img_RGB)\n', (22096, 22105), True, 'import numpy as np\n'), ((22118, 22161), 'numpy.zeros', 'np.zeros', (['(1, 448, 448, 3)'], {'dtype': '"""float32"""'}), "((1, 448, 448, 3), dtype='float32')\n", (22126, 22161), True, 'import numpy as np\n'), ((24588, 24637), 'os.path.join', 'os.path.join', (['root_folder', 'sequence_name', '"""img1/"""'], {}), "(root_folder, sequence_name, 'img1/')\n", (24600, 24637), False, 'import os\n'), ((5728, 5763), 'tensorflow.matmul', 'tf.matmul', (['inputs_processed', 'weight'], {}), '(inputs_processed, weight)\n', (5737, 5763), True, 'import tensorflow as tf\n'), ((6455, 6466), 'time.time', 'time.time', ([], {}), '()\n', (6464, 6466), False, 'import time\n'), ((8093, 8143), 'numpy.multiply', 'np.multiply', (['class_probs[:, :, j]', 'scales[:, :, i]'], {}), '(class_probs[:, :, j], scales[:, :, i])\n', (8104, 8143), True, 'import numpy as np\n'), ((8545, 8571), 'numpy.argsort', 'np.argsort', (['probs_filtered'], {}), '(probs_filtered)\n', (8555, 8571), True, 'import numpy as np\n'), ((9945, 10014), 'cv2.rectangle', 'cv2.rectangle', (['img_cp', '(x - w, y - h)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)\n', (9958, 10014), False, 'import cv2\n'), ((10019, 10098), 'cv2.rectangle', 'cv2.rectangle', (['img_cp', '(x - w, y - h - 20)', '(x + w, y - h)', '(125, 125, 125)', '(-1)'], {}), '(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)\n', (10032, 10098), False, 'import cv2\n'), ((10103, 10239), 'cv2.putText', 'cv2.putText', (['img_cp', "(results[i][0] + ' : %.2f' % results[i][5])", '(x - w + 5, y - h - 7)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 0)', '(1)'], {}), "(img_cp, results[i][0] + ' : %.2f' % results[i][5], (x - w + 5, \n y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n", (10114, 10239), False, 'import cv2\n'), ((17696, 17707), 'time.time', 'time.time', ([], {}), '()\n', (17705, 17707), False, 'import time\n'), ((24714, 24763), 'os.path.join', 'os.path.join', (['root_folder', 'sequence_name', '"""img1/"""'], {}), "(root_folder, sequence_name, 'img1/')\n", (24726, 24763), False, 'import os\n'), ((18479, 18519), 'numpy.reshape', 'np.reshape', (['feature', '[-1, self.num_feat]'], {}), '(feature, [-1, self.num_feat])\n', (18489, 18519), True, 'import numpy as np\n'), ((18526, 18570), 'numpy.reshape', 'np.reshape', (['location', '[-1, self.num_predict]'], {}), '(location, [-1, self.num_predict])\n', (18536, 18570), True, 'import numpy as np\n'), ((21142, 21182), 'numpy.reshape', 'np.reshape', (['feature', '[-1, self.num_feat]'], {}), '(feature, [-1, self.num_feat])\n', (21152, 21182), True, 'import numpy as np\n'), ((21189, 21236), 'numpy.reshape', 'np.reshape', (['heatmap_vec', '[-1, self.num_heatmap]'], {}), '(heatmap_vec, [-1, self.num_heatmap])\n', (21199, 21236), True, 'import numpy as np\n'), ((13849, 13862), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (13856, 13862), False, 'import os\n'), ((22818, 22858), 'numpy.reshape', 'np.reshape', (['feature', '[-1, self.num_feat]'], {}), '(feature, [-1, self.num_feat])\n', (22828, 22858), True, 'import numpy as np\n'), ((22872, 22917), 'numpy.reshape', 'np.reshape', (['locations', '[-1, self.num_predict]'], {}), '(locations, [-1, self.num_predict])\n', (22882, 22917), True, 'import numpy as np\n'), ((22969, 23009), 'numpy.reshape', 'np.reshape', (['feature', '[-1, self.num_feat]'], {}), '(feature, [-1, self.num_feat])\n', (22979, 23009), True, 'import numpy as np\n'), ((23023, 23068), 'numpy.reshape', 'np.reshape', (['locations', '[-1, self.num_predict]'], {}), '(locations, [-1, self.num_predict])\n', (23033, 23068), True, 'import numpy as np\n'), ((7563, 7575), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (7572, 7575), True, 'import numpy as np\n')] |
import time
from collections import OrderedDict
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader
import logging
from tqdm import tqdm
from neuralprophet import configure
from neuralprophet import time_net
from neuralprophet import time_dataset
from neuralprophet import df_utils
from neuralprophet import utils
from neuralprophet.plot_forecast import plot, plot_components
from neuralprophet.plot_model_parameters import plot_parameters
from neuralprophet import metrics
log = logging.getLogger("NP.forecaster")
METRICS = {
"mae": metrics.MAE,
"mse": metrics.MSE,
"rmse": metrics.RMSE,
}
class NeuralProphet:
"""NeuralProphet forecaster.
A simple yet powerful forecaster that models:
Trend, seasonality, events, holidays, auto-regression, lagged covariates, and future-known regressors.
Can be regualrized and configured to model nonlinear relationships.
"""
def __init__(
self,
growth="linear",
changepoints=None,
n_changepoints=10,
changepoints_range=0.9,
trend_reg=0,
trend_reg_threshold=False,
yearly_seasonality="auto",
weekly_seasonality="auto",
daily_seasonality="auto",
seasonality_mode="additive",
seasonality_reg=0,
n_forecasts=1,
n_lags=0,
num_hidden_layers=0,
d_hidden=None,
ar_sparsity=None,
learning_rate=None,
epochs=None,
batch_size=None,
loss_func="Huber",
optimizer="AdamW",
newer_samples_weight=2,
newer_samples_start=0.0,
impute_missing=True,
collect_metrics=True,
normalize="auto",
global_normalization=False,
global_time_normalization=True,
unknown_data_normalization=False,
):
"""
Args:
## Trend Config
growth (str): ['off', 'linear'] to specify
no trend or a linear trend.
Note: 'discontinuous' setting is actually not a trend per se. only use if you know what you do.
changepoints list: Dates at which to include potential changepoints.
If not specified, potential changepoints are selected automatically.
data format: list of str, list of np.datetimes, np.array of np.datetimes (not np.array of np.str)
n_changepoints (int): Number of potential changepoints to include.
Changepoints are selected uniformly from the first `changepoint_range` proportion of the history.
Not used if input `changepoints` is supplied. If `changepoints` is not supplied.
changepoints_range (float): Proportion of history in which trend changepoints will
be estimated. Defaults to 0.8 for the first 80%. Not used if `changepoints` is specified.
trend_reg (float): Parameter modulating the flexibility of the automatic changepoint selection.
Large values (~1-100) will limit the variability of changepoints.
Small values (~0.001-1.0) will allow changepoints to change faster.
default: 0 will fully fit a trend to each segment.
trend_reg_threshold (bool, float): Allowance for trend to change without regularization.
True: Automatically set to a value that leads to a smooth trend.
False: All changes in changepoints are regularized
## Seasonality Config
yearly_seasonality (bool, int): Fit yearly seasonality.
Can be 'auto', True, False, or a number of Fourier/linear terms to generate.
weekly_seasonality (bool, int): Fit monthly seasonality.
Can be 'auto', True, False, or a number of Fourier/linear terms to generate.
daily_seasonality (bool, int): Fit daily seasonality.
Can be 'auto', True, False, or a number of Fourier/linear terms to generate.
seasonality_mode (str): 'additive' (default) or 'multiplicative'.
seasonality_reg (float): Parameter modulating the strength of the seasonality model.
Smaller values (~0.1-1) allow the model to fit larger seasonal fluctuations,
larger values (~1-100) dampen the seasonality.
default: None, no regularization
## AR Config
n_lags (int): Previous time series steps to include in auto-regression. Aka AR-order
ar_sparsity (float): [0-1], how much sparsity to enduce in the AR-coefficients.
Should be around (# nonzero components) / (AR order), eg. 3/100 = 0.03
## Model Config
n_forecasts (int): Number of steps ahead of prediction time step to forecast.
num_hidden_layers (int): number of hidden layer to include in AR-Net. defaults to 0.
d_hidden (int): dimension of hidden layers of the AR-Net. Ignored if num_hidden_layers == 0.
## Train Config
learning_rate (float): Maximum learning rate setting for 1cycle policy scheduler.
default: None: Automatically sets the learning_rate based on a learning rate range test.
For manual values, try values ~0.001-10.
epochs (int): Number of epochs (complete iterations over dataset) to train model.
default: None: Automatically sets the number of epochs based on dataset size.
For best results also leave batch_size to None.
For manual values, try ~5-500.
batch_size (int): Number of samples per mini-batch.
default: None: Automatically sets the batch_size based on dataset size.
For best results also leave epochs to None.
For manual values, try ~1-512.
loss_func (str, torch.nn.modules.loss._Loss, 'typing.Callable'):
Type of loss to use: str ['Huber', 'MSE', 'MAE'],
or torch loss or callable for custom loss, eg. asymmetric Huber loss
collect_metrics (list, bool): the names of metrics to compute. Valid: ['mae', 'rmse', 'mse']
True (default): ['mae', 'rmse']
False: No metrics
## Missing Data
impute_missing (bool): whether to automatically impute missing dates/values
imputation follows a linear method up to 10 missing values, more are filled with trend.
## Data Normalization
normalize (str): Type of normalization to apply to the time series.
options: [ 'off', 'minmax, 'standardize', 'soft', 'soft1']
default: 'soft', unless the time series is binary, in which case 'minmax' is applied.
'off' bypasses data normalization
'minmax' scales the minimum value to 0.0 and the maximum value to 1.0
'standardize' zero-centers and divides by the standard deviation
'soft' scales the minimum value to 0.0 and the 95th quantile to 1.0
'soft1' scales the minimum value to 0.1 and the 90th quantile to 0.9
global_normalization (bool): when set to true and dict of dataframes are used as global_time_normalization,
input global data params are considered - default is local normalization.
global_time_normalization (bool): set time data_params locally when set to false,
only valid in case of global modeling local normalization (default)
unknown_data_normalization (bool): when unknown_data_normalization is set to True, test data is normalized with global data params even if trained with local data params (global modeling with local normalization)
"""
kwargs = locals()
# General
self.name = "NeuralProphet"
self.n_forecasts = n_forecasts
# Data Normalization settings
self.config_normalization = configure.Normalization(
normalize=normalize,
global_normalization=global_normalization,
global_time_normalization=global_time_normalization,
unknown_data_normalization=unknown_data_normalization,
)
# Missing Data Preprocessing
self.impute_missing = impute_missing
self.impute_limit_linear = 5
self.impute_rolling = 20
# Training
self.config_train = configure.from_kwargs(configure.Train, kwargs)
if collect_metrics is None:
collect_metrics = []
elif collect_metrics is True:
collect_metrics = ["mae", "rmse"]
elif isinstance(collect_metrics, str):
if not collect_metrics.lower() in METRICS.keys():
raise ValueError("Received unsupported argument for collect_metrics.")
collect_metrics = [collect_metrics]
elif isinstance(collect_metrics, list):
if not all([m.lower() in METRICS.keys() for m in collect_metrics]):
raise ValueError("Received unsupported argument for collect_metrics.")
elif collect_metrics is not False:
raise ValueError("Received unsupported argument for collect_metrics.")
self.metrics = None
if isinstance(collect_metrics, list):
self.metrics = metrics.MetricsCollection(
metrics=[metrics.LossMetric(self.config_train.loss_func)]
+ [METRICS[m.lower()]() for m in collect_metrics],
value_metrics=[metrics.ValueMetric("RegLoss")],
)
# AR
self.config_ar = configure.from_kwargs(configure.AR, kwargs)
self.n_lags = self.config_ar.n_lags
if n_lags == 0 and n_forecasts > 1:
self.n_forecasts = 1
log.warning(
"Changing n_forecasts to 1. Without lags, the forecast can be "
"computed for any future time, independent of lagged values"
)
# Model
self.config_model = configure.from_kwargs(configure.Model, kwargs)
# Trend
self.config_trend = configure.from_kwargs(configure.Trend, kwargs)
# Seasonality
self.season_config = configure.AllSeason(
mode=seasonality_mode,
reg_lambda=seasonality_reg,
yearly_arg=yearly_seasonality,
weekly_arg=weekly_seasonality,
daily_arg=daily_seasonality,
)
self.config_train.reg_lambda_season = self.season_config.reg_lambda
# Events
self.events_config = None
self.country_holidays_config = None
# Extra Regressors
self.config_covar = None
self.regressors_config = None
# set during fit()
self.data_freq = None
# Set during _train()
self.fitted = False
self.data_params = None
self.optimizer = None
self.scheduler = None
self.model = None
# set during prediction
self.future_periods = None
# later set by user (optional)
self.highlight_forecast_step_n = None
self.true_ar_weights = None
def add_lagged_regressor(self, names, regularization=None, normalize="auto", only_last_value=False):
"""Add a covariate or list of covariate time series as additional lagged regressors to be used for fitting and predicting.
The dataframe passed to `fit` and `predict` will have the column with the specified name to be used as
lagged regressor. When normalize=True, the covariate will be normalized unless it is binary.
Args:
names (string or list): name of the regressor/list of regressors.
regularization (float): optional scale for regularization strength
normalize (bool): optional, specify whether this regressor will be
normalized prior to fitting.
if 'auto', binary regressors will not be normalized.
only_last_value (bool):
False (default) use same number of lags as auto-regression
True: only use last known value as input
Returns:
NeuralProphet object
"""
if self.fitted:
raise Exception("Covariates must be added prior to model fitting.")
if self.n_lags == 0:
raise Exception("Covariates must be set jointly with Auto-Regression.")
if not isinstance(names, list):
names = [names]
for name in names:
self._validate_column_name(name)
if self.config_covar is None:
self.config_covar = OrderedDict({})
self.config_covar[name] = configure.Covar(
reg_lambda=regularization,
normalize=normalize,
as_scalar=only_last_value,
)
return self
def add_future_regressor(self, name, regularization=None, normalize="auto", mode="additive"):
"""Add a regressor as lagged covariate with order 1 (scalar) or as known in advance (also scalar).
The dataframe passed to `fit` and `predict` will have a column with the specified name to be used as
a regressor. When normalize=True, the regressor will be normalized unless it is binary.
Args:
name (string): name of the regressor.
regularization (float): optional scale for regularization strength
normalize (bool): optional, specify whether this regressor will be
normalized prior to fitting.
if 'auto', binary regressors will not be normalized.
mode (str): 'additive' (default) or 'multiplicative'.
Returns:
NeuralProphet object
"""
if self.fitted:
raise Exception("Regressors must be added prior to model fitting.")
if regularization is not None:
if regularization < 0:
raise ValueError("regularization must be >= 0")
if regularization == 0:
regularization = None
self._validate_column_name(name)
if self.regressors_config is None:
self.regressors_config = {}
self.regressors_config[name] = configure.Regressor(reg_lambda=regularization, normalize=normalize, mode=mode)
return self
def add_events(self, events, lower_window=0, upper_window=0, regularization=None, mode="additive"):
"""
Add user specified events and their corresponding lower, upper windows and the
regularization parameters into the NeuralProphet object
Args:
events (str, list): name or list of names of user specified events
lower_window (int): the lower window for the events in the list of events
upper_window (int): the upper window for the events in the list of events
regularization (float): optional scale for regularization strength
mode (str): 'additive' (default) or 'multiplicative'.
Returns:
NeuralProphet object
"""
if self.fitted:
raise Exception("Events must be added prior to model fitting.")
if self.events_config is None:
self.events_config = OrderedDict({})
if regularization is not None:
if regularization < 0:
raise ValueError("regularization must be >= 0")
if regularization == 0:
regularization = None
if not isinstance(events, list):
events = [events]
for event_name in events:
self._validate_column_name(event_name)
self.events_config[event_name] = configure.Event(
lower_window=lower_window, upper_window=upper_window, reg_lambda=regularization, mode=mode
)
return self
def add_country_holidays(self, country_name, lower_window=0, upper_window=0, regularization=None, mode="additive"):
"""
Add a country into the NeuralProphet object to include country specific holidays
and create the corresponding configs such as lower, upper windows and the regularization
parameters
Args:
country_name (string): name of the country
lower_window (int): the lower window for all the country holidays
upper_window (int): the upper window for all the country holidays
regularization (float): optional scale for regularization strength
mode (str): 'additive' (default) or 'multiplicative'.
Returns:
NeuralProphet object
"""
if self.fitted:
raise Exception("Country must be specified prior to model fitting.")
if regularization is not None:
if regularization < 0:
raise ValueError("regularization must be >= 0")
if regularization == 0:
regularization = None
self.country_holidays_config = configure.Holidays(
country=country_name,
lower_window=lower_window,
upper_window=upper_window,
reg_lambda=regularization,
mode=mode,
)
self.country_holidays_config.init_holidays()
return self
def add_seasonality(self, name, period, fourier_order):
"""Add a seasonal component with specified period, number of Fourier components, and regularization.
Increasing the number of Fourier components allows the seasonality to change more quickly
(at risk of overfitting).
Note: regularization and mode (additive/multiplicative) are set in the main init.
Args:
name (string): name of the seasonality component.
period (float): number of days in one period.
fourier_order (int): number of Fourier components to use.
Returns:
The NeuralProphet object.
"""
if self.fitted:
raise Exception("Seasonality must be added prior to model fitting.")
if name in ["daily", "weekly", "yearly"]:
log.error("Please use inbuilt daily, weekly, or yearly seasonality or set another name.")
# Do not Allow overwriting built-in seasonalities
self._validate_column_name(name, seasons=True)
if fourier_order <= 0:
raise ValueError("Fourier Order must be > 0")
self.season_config.append(name=name, period=period, resolution=fourier_order, arg="custom")
return self
def fit(self, df, freq="auto", validation_df=None, progress="bar", minimal=False):
"""Train, and potentially evaluate model.
Args:
df (pd.DataFrame, dict): pd.DataFrame or dict of dataframes containing column 'ds', 'y' with all data
freq (str):Data step sizes. Frequency of data recording,
Any valid frequency for pd.date_range, such as '5min', 'D', 'MS' or 'auto' (default) to automatically set frequency.
validation_df (pd.DataFrame, dict): if provided, model with performance will be evaluated
after each training epoch over this data.
epochs (int): number of epochs to train (overrides default setting).
default: if not specified, uses self.epochs
progress (str): Method of progress display: ["bar", "print", "plot", "plot-all", "none"]
"bar": display updating progress bar (tqdm)
"print" print out progress (fallback option)
"plot": plot a live updating graph of the training loss,
requires [live] install or livelossplot package installed.
"plot-all": "plot" extended to all recorded metrics.
minimal (bool): whether to train without any printouts or metrics collection
Returns:
metrics with training and potentially evaluation metrics
"""
df_dict, _ = df_utils.prep_copy_df_dict(df)
if self.fitted is True:
log.error("Model has already been fitted. Re-fitting may break or produce different results.")
df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=True)
self.data_freq = df_utils.infer_frequency(df_dict, n_lags=self.n_lags, freq=freq)
df_dict = self._handle_missing_data(df_dict, freq=self.data_freq)
if validation_df is not None and (self.metrics is None or minimal):
log.warning("Ignoring validation_df because no metrics set or minimal training set.")
validation_df = None
if validation_df is None:
if minimal:
self._train_minimal(df_dict, progress_bar=progress == "bar")
metrics_df = None
else:
metrics_df = self._train(df_dict, progress=progress)
else:
df_val_dict, _ = df_utils.prep_copy_df_dict(validation_df)
df_val_dict = self._check_dataframe(df_val_dict, check_y=False, exogenous=False)
df_val_dict = self._handle_missing_data(df_val_dict, freq=self.data_freq)
metrics_df = self._train(df_dict, df_val_dict=df_val_dict, progress=progress)
self.fitted = True
return metrics_df
def predict(self, df, decompose=True, raw=False):
"""Runs the model to make predictions.
Expects all data needed to be present in dataframe.
If you are predicting into the unknown future and need to add future regressors or events,
please prepare data with make_future_dataframe.
Args:
df (pd.DataFrame, dict): dataframe or dict of dataframes containing column 'ds', 'y' with data and
other external variables
decompose (bool): Whether to add individual components of forecast to the dataframe
raw (bool): Whether return the raw forecasts sorted by forecast start date
False (default): returns forecasts sorted by target (highlighting forecast age)
Returns:
if raw:
df_raw (pd.DataFrame): columns 'ds', 'y', and ['step<i>']
where step<i> refers to the i-step-ahead prediction *made at* this row's datetime.
e.g. step3 is the prediction for 3 steps into the future,
predicted using information up to (excluding) this datetime.
else:
df_forecast (pd.DataFrame, dict): columns 'ds', 'y', 'trend' and ['yhat<i>']
where yhat<i> refers to the i-step-ahead prediction for this row's datetime.
e.g. yhat3 is the prediction for this datetime, predicted 3 steps ago, "3 steps old".
"""
if raw:
log.warning("Raw forecasts are incompatible with plotting utilities")
if self.fitted is False:
raise ValueError("Model has not been fitted. Predictions will be random.")
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
# to get all forecasteable values with df given, maybe extend into future:
df_dict, periods_added = self._maybe_extend_df(df_dict)
df_dict = self._prepare_dataframe_to_predict(df_dict)
# normalize
df_dict = self._normalize(df_dict)
for key, df_i in df_dict.items():
dates, predicted, components = self._predict_raw(df_i, key, include_components=decompose)
if raw:
fcst = self._convert_raw_predictions_to_raw_df(dates, predicted, components)
if periods_added[key] > 0:
fcst = fcst[:-1]
else:
fcst = self._reshape_raw_predictions_to_forecst_df(df_i, predicted, components)
if periods_added[key] > 0:
fcst = fcst[: -periods_added[key]]
df_dict[key] = fcst
df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)
return df
def test(self, df):
"""Evaluate model on holdout data.
Args:
df (pd.DataFrame,dict): dataframe or dict of dataframes containing column 'ds', 'y' with with holdout data
Returns:
df with evaluation metrics
"""
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
if self.fitted is False:
log.warning("Model has not been fitted. Test results will be random.")
df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=True)
_ = df_utils.infer_frequency(df_dict, n_lags=self.n_lags, freq=self.data_freq)
df_dict = self._handle_missing_data(df_dict, freq=self.data_freq)
loader = self._init_val_loader(df_dict)
val_metrics_df = self._evaluate(loader)
if not self.config_normalization.global_normalization:
log.warning("Note that the metrics are displayed in normalized scale because of local normalization.")
return val_metrics_df
def split_df(self, df, freq="auto", valid_p=0.2, local_split=False):
"""Splits timeseries df into train and validation sets.
Prevents leakage of targets. Sharing/Overbleed of inputs can be configured.
Also performs basic data checks and fills in missing data.
Args:
df (pd.DataFrame, dict): dataframe or dict of dataframes containing column 'ds', 'y' with all data
freq (str):Data step sizes. Frequency of data recording,
Any valid frequency for pd.date_range, such as '5min', 'D', 'MS' or 'auto' (default) to automatically set frequency.
valid_p (float): fraction of data to use for holdout validation set
Targets will still never be shared.
local_split (bool): Each dataframe will be split according to valid_p locally (in case of dict of dataframes)
Returns:
df_train (pd.DataFrame,dict): training data
df_val (pd.DataFrame,dict): validation data
"""
df, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df = self._check_dataframe(df, check_y=False, exogenous=False)
freq = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=freq)
df = self._handle_missing_data(df, freq=freq, predicting=False)
df_train, df_val = df_utils.split_df(
df,
n_lags=self.n_lags,
n_forecasts=self.n_forecasts,
valid_p=valid_p,
inputs_overbleed=True,
local_split=local_split,
)
df_train = df_utils.maybe_get_single_df_from_df_dict(df_train, received_unnamed_df)
df_val = df_utils.maybe_get_single_df_from_df_dict(df_val, received_unnamed_df)
return df_train, df_val
def crossvalidation_split_df(self, df, freq="auto", k=5, fold_pct=0.1, fold_overlap_pct=0.5):
"""Splits timeseries data in k folds for crossvalidation.
Args:
df (pd.DataFrame): data
freq (str):Data step sizes. Frequency of data recording,
Any valid frequency for pd.date_range, such as '5min', 'D', 'MS' or 'auto' (default) to automatically set frequency.
k: number of CV folds
fold_pct: percentage of overall samples to be in each fold
fold_overlap_pct: percentage of overlap between the validation folds.
Returns:
list of k tuples [(df_train, df_val), ...] where:
df_train (pd.DataFrame): training data
df_val (pd.DataFrame): validation data
"""
if isinstance(df, dict):
raise NotImplementedError("Crossvalidation not implemented for multiple dataframes")
df = df.copy(deep=True)
df = self._check_dataframe(df, check_y=False, exogenous=False)
freq = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=freq)
df = self._handle_missing_data(df, freq=freq, predicting=False)
folds = df_utils.crossvalidation_split_df(
df,
n_lags=self.n_lags,
n_forecasts=self.n_forecasts,
k=k,
fold_pct=fold_pct,
fold_overlap_pct=fold_overlap_pct,
)
return folds
def double_crossvalidation_split_df(self, df, freq="auto", k=5, valid_pct=0.10, test_pct=0.10):
"""Splits timeseries data in two sets of k folds for crossvalidation on training and testing data.
Args:
df (pd.DataFrame): data
freq (str):Data step sizes. Frequency of data recording,
Any valid frequency for pd.date_range, such as '5min', 'D', 'MS' or 'auto' (default) to automatically set frequency.
k (int): number of CV folds
valid_pct (float): percentage of overall samples to be in validation
test_pct (float): percentage of overall samples to be in test
Returns:
tuple of folds_val, folds_test, where each are same as crossvalidation_split_df returns
"""
if isinstance(df, dict):
raise NotImplementedError("Double crossvalidation not implemented for multiple dataframes")
df = df.copy(deep=True)
df = self._check_dataframe(df, check_y=False, exogenous=False)
freq = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=freq)
df = self._handle_missing_data(df, freq=freq, predicting=False)
folds_val, folds_test = df_utils.double_crossvalidation_split_df(
df,
n_lags=self.n_lags,
n_forecasts=self.n_forecasts,
k=k,
valid_pct=valid_pct,
test_pct=test_pct,
)
return folds_val, folds_test
def create_df_with_events(self, df, events_df):
"""
Create a concatenated dataframe with the time series data along with the events data expanded.
Args:
df (dict, pd.DataFrame): containing column 'ds' and 'y'
events_df (dict, pd.DataFrame): containing column 'ds' and 'event'
Returns:
df (dict, pd.DataFrame): with columns 'y', 'ds' and other user specified events
"""
if self.events_config is None:
raise Exception(
"The events configs should be added to the NeuralProphet object (add_events fn)"
"before creating the data with events features"
)
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=False)
if isinstance(events_df, pd.DataFrame):
events_df_i = events_df.copy(deep=True)
for df_name, df_i in df_dict.items():
if isinstance(events_df, dict):
events_df_i = events_df[df_name].copy(deep=True)
for name in events_df_i["event"].unique():
assert name in self.events_config
df_out = df_utils.convert_events_to_features(
df_i,
events_config=self.events_config,
events_df=events_df_i,
)
df_dict[df_name] = df_out.reset_index(drop=True)
df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)
return df
def make_future_dataframe(self, df, events_df=None, regressors_df=None, periods=None, n_historic_predictions=False):
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df_dict_events, received_unnamed_events_df = df_utils.prep_copy_df_dict(events_df)
df_dict_regressors, received_unnamed_regressors_df = df_utils.prep_copy_df_dict(regressors_df)
if received_unnamed_events_df:
df_dict_events = {key: df_dict_events["__df__"] for key in df_dict.keys()}
elif df_dict_events is None:
df_dict_events = {key: None for key in df_dict.keys()}
else:
df_utils.compare_dict_keys(df_dict, df_dict_events, "dataframes", "events")
if received_unnamed_regressors_df:
df_dict_regressors = {key: df_dict_regressors["__df__"] for key in df_dict.keys()}
elif df_dict_regressors is None:
df_dict_regressors = {key: None for key in df_dict.keys()}
else:
df_utils.compare_dict_keys(df_dict, df_dict_regressors, "dataframes", "regressors")
df_future_dataframe = {}
for key in df_dict.keys():
df_future_dataframe[key] = self._make_future_dataframe(
df=df_dict[key],
events_df=df_dict_events[key],
regressors_df=df_dict_regressors[key],
periods=periods,
n_historic_predictions=n_historic_predictions,
)
df_future = df_utils.maybe_get_single_df_from_df_dict(df_future_dataframe, received_unnamed_df)
return df_future
def predict_trend(self, df):
"""Predict only trend component of the model.
Args:
df (pd.DataFrame, dict): dataframe or dict of dataframes containing column 'ds', prediction dates
Returns:
df (dict, pd.DataFrame): trend on prediction dates.
"""
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df_dict = self._check_dataframe(df_dict, check_y=False, exogenous=False)
df_dict = self._normalize(df_dict)
for df_name, df in df_dict.items():
t = torch.from_numpy(np.expand_dims(df["t"].values, 1))
trend = self.model.trend(t).squeeze().detach().numpy()
data_params = self.config_normalization.get_data_params(df_name)
trend = trend * data_params["y"].scale + data_params["y"].shift
df_dict[df_name] = pd.DataFrame({"ds": df["ds"], "trend": trend})
df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)
return df
def predict_seasonal_components(self, df):
"""Predict seasonality components
Args:
df (pd.DataFrame, dict): dataframe or dict of dataframes containing column 'ds', prediction dates
Returns:
df (pd.DataFrame, dict): seasonal components with columns of name <seasonality component name>
"""
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df_dict = self._check_dataframe(df_dict, check_y=False, exogenous=False)
df_dict = self._normalize(df_dict)
for df_name, df in df_dict.items():
dataset = time_dataset.TimeDataset(
df,
name=df_name,
season_config=self.season_config,
# n_lags=0,
# n_forecasts=1,
predict_mode=True,
)
loader = DataLoader(dataset, batch_size=min(4096, len(df)), shuffle=False, drop_last=False)
predicted = {}
for name in self.season_config.periods:
predicted[name] = list()
for inputs, _, _ in loader:
for name in self.season_config.periods:
features = inputs["seasonalities"][name]
y_season = torch.squeeze(self.model.seasonality(features=features, name=name))
predicted[name].append(y_season.data.numpy())
for name in self.season_config.periods:
predicted[name] = np.concatenate(predicted[name])
if self.season_config.mode == "additive":
data_params = self.config_normalization.get_data_params(df_name)
predicted[name] = predicted[name] * data_params["y"].scale
df_dict[df_name] = pd.DataFrame({"ds": df["ds"], **predicted})
df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)
return df
def set_true_ar_for_eval(self, true_ar_weights):
"""configures model to evaluate closeness of AR weights to true weights.
Args:
true_ar_weights (np.array): True AR-parameters, if known.
"""
self.true_ar_weights = true_ar_weights
def highlight_nth_step_ahead_of_each_forecast(self, step_number=None):
"""Set which forecast step to focus on for metrics evaluation and plotting.
Args:
step_number (int): i-th step ahead forecast to use for statistics and plotting.
default: None.
"""
if step_number is not None:
assert step_number <= self.n_forecasts
self.highlight_forecast_step_n = step_number
return self
def plot(self, fcst, ax=None, xlabel="ds", ylabel="y", figsize=(10, 6)):
"""Plot the NeuralProphet forecast, including history.
Args:
fcst (pd.DataFrame): output of self.predict.
ax (matplotlib axes): Optional, matplotlib axes on which to plot.
xlabel (string): label name on X-axis
ylabel (string): label name on Y-axis
figsize (tuple): width, height in inches. default: (10, 6)
Returns:
A matplotlib figure.
"""
if isinstance(fcst, dict):
log.error("Receiced more than one DataFrame. Use a for loop for many dataframes.")
if self.n_lags > 0:
num_forecasts = sum(fcst["yhat1"].notna())
if num_forecasts < self.n_forecasts:
log.warning(
"Too few forecasts to plot a line per forecast step." "Plotting a line per forecast origin instead."
)
return self.plot_last_forecast(
fcst,
ax=ax,
xlabel=xlabel,
ylabel=ylabel,
figsize=figsize,
include_previous_forecasts=num_forecasts - 1,
plot_history_data=True,
)
return plot(
fcst=fcst,
ax=ax,
xlabel=xlabel,
ylabel=ylabel,
figsize=figsize,
highlight_forecast=self.highlight_forecast_step_n,
)
def plot_last_forecast(
self,
fcst,
ax=None,
xlabel="ds",
ylabel="y",
figsize=(10, 6),
include_previous_forecasts=0,
plot_history_data=None,
):
"""Plot the NeuralProphet forecast, including history.
Args:
fcst (pd.DataFrame): output of self.predict.
ax (matplotlib axes): Optional, matplotlib axes on which to plot.
xlabel (string): label name on X-axis
ylabel (string): label name on Y-axis
figsize (tuple): width, height in inches. default: (10, 6)
include_previous_forecasts (int): number of previous forecasts to include in plot
plot_history_data
Returns:
A matplotlib figure.
"""
if self.n_lags == 0:
raise ValueError("Use the standard plot function for models without lags.")
if isinstance(fcst, dict):
log.error("Receiced more than one DataFrame. Use a for loop for many dataframes.")
if plot_history_data is None:
fcst = fcst[-(include_previous_forecasts + self.n_forecasts + self.n_lags) :]
elif plot_history_data is False:
fcst = fcst[-(include_previous_forecasts + self.n_forecasts) :]
elif plot_history_data is True:
fcst = fcst
fcst = utils.fcst_df_to_last_forecast(fcst, n_last=1 + include_previous_forecasts)
return plot(
fcst=fcst,
ax=ax,
xlabel=xlabel,
ylabel=ylabel,
figsize=figsize,
highlight_forecast=self.highlight_forecast_step_n,
line_per_origin=True,
)
def plot_components(self, fcst, figsize=None, residuals=False):
"""Plot the NeuralProphet forecast components.
Args:
fcst (pd.DataFrame): output of self.predict
figsize (tuple): width, height in inches.
None (default): automatic (10, 3 * npanel)
Returns:
A matplotlib figure.
"""
if isinstance(fcst, dict):
log.error("Receiced more than one DataFrame. Use a for loop for many dataframes.")
return plot_components(
m=self,
fcst=fcst,
figsize=figsize,
forecast_in_focus=self.highlight_forecast_step_n,
residuals=residuals,
)
def plot_parameters(self, weekly_start=0, yearly_start=0, figsize=None, df_name=None):
"""Plot the NeuralProphet forecast components.
Args:
weekly_start (int): specifying the start day of the weekly seasonality plot.
0 (default) starts the week on Sunday. 1 shifts by 1 day to Monday, and so on.
yearly_start (int): specifying the start day of the yearly seasonality plot.
0 (default) starts the year on Jan 1. 1 shifts by 1 day to Jan 2, and so on.
df_name: name of dataframe to refer to data params from original keys of train dataframes (used for local normalization in global modeling)
figsize (tuple): width, height in inches.
None (default): automatic (10, 3 * npanel)
Returns:
A matplotlib figure.
"""
return plot_parameters(
m=self,
forecast_in_focus=self.highlight_forecast_step_n,
weekly_start=weekly_start,
yearly_start=yearly_start,
figsize=figsize,
df_name=df_name,
)
def _init_model(self):
"""Build Pytorch model with configured hyperparamters.
Returns:
TimeNet model
"""
self.model = time_net.TimeNet(
config_trend=self.config_trend,
config_season=self.season_config,
config_covar=self.config_covar,
config_regressors=self.regressors_config,
config_events=self.events_config,
config_holidays=self.country_holidays_config,
n_forecasts=self.n_forecasts,
n_lags=self.n_lags,
num_hidden_layers=self.config_model.num_hidden_layers,
d_hidden=self.config_model.d_hidden,
)
log.debug(self.model)
return self.model
def _create_dataset(self, df_dict, predict_mode):
"""Construct dataset from dataframe.
(Configured Hyperparameters can be overridden by explicitly supplying them.
Useful to predict a single model component.)
Args:
df_dict (dict): containing pd.DataFrames of original and normalized columns 'ds', 'y', 't', 'y_scaled'
predict_mode (bool): False includes target values.
True does not include targets but includes entire dataset as input
Returns:
TimeDataset
"""
return time_dataset.GlobalTimeDataset(
df_dict,
predict_mode=predict_mode,
n_lags=self.n_lags,
n_forecasts=self.n_forecasts,
season_config=self.season_config,
events_config=self.events_config,
country_holidays_config=self.country_holidays_config,
covar_config=self.config_covar,
regressors_config=self.regressors_config,
)
def __handle_missing_data(self, df, freq, predicting):
"""Checks, auto-imputes and normalizes new data
Args:
df (pd.DataFrame): raw data with columns 'ds' and 'y'
freq (str): data frequency
predicting (bool): when no lags, allow NA values in 'y' of forecast series or 'y' to miss completely
Returns:
pre-processed df
"""
if self.n_lags == 0 and not predicting:
# we can drop rows with NA in y
sum_na = sum(df["y"].isna())
if sum_na > 0:
df = df[df["y"].notna()]
log.info("dropped {} NAN row in 'y'".format(sum_na))
# add missing dates for autoregression modelling
if self.n_lags > 0:
df, missing_dates = df_utils.add_missing_dates_nan(df, freq=freq)
if missing_dates > 0:
if self.impute_missing:
log.info("{} missing dates added.".format(missing_dates))
else:
raise ValueError(
"{} missing dates found. Please preprocess data manually or set impute_missing to True.".format(
missing_dates
)
)
if self.regressors_config is not None:
# if future regressors, check that they are not nan at end, else drop
# we ignore missing events, as those will be filled in with zeros.
reg_nan_at_end = 0
for col in self.regressors_config.keys():
col_nan_at_end = 0
while len(df) > col_nan_at_end and df[col].isnull().iloc[-(1 + col_nan_at_end)]:
col_nan_at_end += 1
reg_nan_at_end = max(reg_nan_at_end, col_nan_at_end)
if reg_nan_at_end > 0:
# drop rows at end due to missing future regressors
df = df[:-reg_nan_at_end]
log.info("Dropped {} rows at end due to missing future regressor values.".format(reg_nan_at_end))
df_end_to_append = None
nan_at_end = 0
while len(df) > nan_at_end and df["y"].isnull().iloc[-(1 + nan_at_end)]:
nan_at_end += 1
if nan_at_end > 0:
if predicting:
# allow nans at end - will re-add at end
if self.n_forecasts > 1 and self.n_forecasts < nan_at_end:
# check that not more than n_forecasts nans, else drop surplus
df = df[: -(nan_at_end - self.n_forecasts)]
# correct new length:
nan_at_end = self.n_forecasts
log.info(
"Detected y to have more NaN values than n_forecast can predict. "
"Dropped {} rows at end.".format(nan_at_end - self.n_forecasts)
)
df_end_to_append = df[-nan_at_end:]
df = df[:-nan_at_end]
else:
# training - drop nans at end
df = df[:-nan_at_end]
log.info(
"Dropped {} consecutive nans at end. "
"Training data can only be imputed up to last observation.".format(nan_at_end)
)
# impute missing values
data_columns = []
if self.n_lags > 0:
data_columns.append("y")
if self.config_covar is not None:
data_columns.extend(self.config_covar.keys())
if self.regressors_config is not None:
data_columns.extend(self.regressors_config.keys())
if self.events_config is not None:
data_columns.extend(self.events_config.keys())
for column in data_columns:
sum_na = sum(df[column].isnull())
if sum_na > 0:
if self.impute_missing:
# use 0 substitution for holidays and events missing values
if self.events_config is not None and column in self.events_config.keys():
df[column].fillna(0, inplace=True)
remaining_na = 0
else:
df.loc[:, column], remaining_na = df_utils.fill_linear_then_rolling_avg(
df[column],
limit_linear=self.impute_limit_linear,
rolling=self.impute_rolling,
)
log.info("{} NaN values in column {} were auto-imputed.".format(sum_na - remaining_na, column))
if remaining_na > 0:
raise ValueError(
"More than {} consecutive missing values encountered in column {}. "
"{} NA remain. Please preprocess data manually.".format(
2 * self.impute_limit_linear + self.impute_rolling, column, remaining_na
)
)
else: # fail because set to not impute missing
raise ValueError(
"Missing values found. " "Please preprocess data manually or set impute_missing to True."
)
if df_end_to_append is not None:
df = df.append(df_end_to_append)
return df
def _handle_missing_data(self, df, freq, predicting=False):
"""Checks, auto-imputes and normalizes new data
Args:
df (dict, pd.DataFrame): dict of dataframes of dataframes containing column 'ds', 'y' with all data
freq (str): data frequency
predicting (bool): when no lags, allow NA values in 'y' of forecast series or 'y' to miss completely
Returns:
pre-processed df
"""
df_is_dict = True
if isinstance(df, pd.DataFrame):
df_is_dict = False
df = {"__df__": df}
elif not isinstance(df, dict):
raise ValueError("Please insert valid df type (i.e. pd.DataFrame, dict)")
df_handled_missing_dict = {}
for key in df:
df_handled_missing_dict[key] = self.__handle_missing_data(df[key], freq, predicting)
if not df_is_dict:
df_handled_missing_dict = df_handled_missing_dict["__df__"]
return df_handled_missing_dict
def _check_dataframe(self, df, check_y=True, exogenous=True):
"""Performs basic data sanity checks and ordering
Prepare dataframe for fitting or predicting.
Args:
df (pd.DataFrame, dict): dataframe or dict of dataframes containing column 'ds'
check_y (bool): if df must have series values
set to True if training or predicting with autoregression
exogenous (bool): whether to check covariates, regressors and events column names
Returns:
pd.DataFrame or dict of pd.DataFrame
"""
df_is_dict = True
if isinstance(df, pd.DataFrame):
df_is_dict = False
df = {"__df__": df}
elif not isinstance(df, dict):
raise ValueError("Please insert valid df type (i.e. pd.DataFrame, dict)")
checked_df = {}
for key, df_i in df.items():
checked_df[key] = df_utils.check_single_dataframe(
df=df_i,
check_y=check_y,
covariates=self.config_covar if exogenous else None,
regressors=self.regressors_config if exogenous else None,
events=self.events_config if exogenous else None,
)
if not df_is_dict:
checked_df = checked_df["__df__"]
return checked_df
def _validate_column_name(self, name, events=True, seasons=True, regressors=True, covariates=True):
"""Validates the name of a seasonality, event, or regressor.
Args:
name (str):
events (bool): check if name already used for event
seasons (bool): check if name already used for seasonality
regressors (bool): check if name already used for regressor
"""
reserved_names = [
"trend",
"additive_terms",
"daily",
"weekly",
"yearly",
"events",
"holidays",
"zeros",
"extra_regressors_additive",
"yhat",
"extra_regressors_multiplicative",
"multiplicative_terms",
]
rn_l = [n + "_lower" for n in reserved_names]
rn_u = [n + "_upper" for n in reserved_names]
reserved_names.extend(rn_l)
reserved_names.extend(rn_u)
reserved_names.extend(["ds", "y", "cap", "floor", "y_scaled", "cap_scaled"])
if name in reserved_names:
raise ValueError("Name {name!r} is reserved.".format(name=name))
if events and self.events_config is not None:
if name in self.events_config.keys():
raise ValueError("Name {name!r} already used for an event.".format(name=name))
if events and self.country_holidays_config is not None:
if name in self.country_holidays_config.holiday_names:
raise ValueError(
"Name {name!r} is a holiday name in {country_holidays}.".format(
name=name, country_holidays=self.country_holidays_config.country
)
)
if seasons and self.season_config is not None:
if name in self.season_config.periods:
raise ValueError("Name {name!r} already used for a seasonality.".format(name=name))
if covariates and self.config_covar is not None:
if name in self.config_covar:
raise ValueError("Name {name!r} already used for an added covariate.".format(name=name))
if regressors and self.regressors_config is not None:
if name in self.regressors_config.keys():
raise ValueError("Name {name!r} already used for an added regressor.".format(name=name))
def _normalize(self, df_dict):
"""Apply data scales.
Applies data scaling factors to df using data_params.
Args:
df_dict (dict): dict of pd.Dataframes each df with columns 'ds', 'y', (and potentially more regressors)
Returns:
df_dict: dict of pd.DataFrame, normalized
"""
for df_name, df_i in df_dict.items():
data_params = self.config_normalization.get_data_params(df_name)
df_dict[df_name] = df_utils.normalize(df_i, data_params)
return df_dict
def _init_train_loader(self, df_dict):
"""Executes data preparation steps and initiates training procedure.
Args:
df_dict (dict): dict of pd.DataFrame containing column 'ds', 'y' with training data
Returns:
torch DataLoader
"""
if not isinstance(df_dict, dict):
raise ValueError("df_dict must be a dict of pd.DataFrames.")
# if not self.fitted:
self.config_normalization.init_data_params(
df_dict=df_dict,
covariates_config=self.config_covar,
regressor_config=self.regressors_config,
events_config=self.events_config,
)
df_dict = self._normalize(df_dict)
# if not self.fitted:
if self.config_trend.changepoints is not None:
# scale user-specified changepoint times
self.config_trend.changepoints = self._normalize(
{"__df__": pd.DataFrame({"ds": pd.Series(self.config_trend.changepoints)})}
)["__df__"]["t"].values
df_merged, _ = df_utils.join_dataframes(df_dict)
df_merged = df_merged.sort_values("ds")
df_merged.drop_duplicates(inplace=True, keep="first", subset=["ds"])
self.season_config = utils.set_auto_seasonalities(df_merged, season_config=self.season_config)
if self.country_holidays_config is not None:
self.country_holidays_config.init_holidays(df_merged)
dataset = self._create_dataset(df_dict, predict_mode=False) # needs to be called after set_auto_seasonalities
self.config_train.set_auto_batch_epoch(n_data=len(dataset))
loader = DataLoader(dataset, batch_size=self.config_train.batch_size, shuffle=True)
# if not self.fitted:
self.model = self._init_model() # needs to be called after set_auto_seasonalities
if self.config_train.learning_rate is None:
self.config_train.learning_rate = self.config_train.find_learning_rate(self.model, dataset)
log.info("lr-range-test selected learning rate: {:.2E}".format(self.config_train.learning_rate))
self.optimizer = self.config_train.get_optimizer(self.model.parameters())
self.scheduler = self.config_train.get_scheduler(self.optimizer, steps_per_epoch=len(loader))
return loader
def _init_val_loader(self, df_dict):
"""Executes data preparation steps and initiates evaluation procedure.
Args:
df_dict (dict): dict of pd.DataFrame containing column 'ds', 'y' with validation data
Returns:
torch DataLoader
"""
df_dict = self._normalize(df_dict)
dataset = self._create_dataset(df_dict, predict_mode=False)
loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False)
return loader
def _get_time_based_sample_weight(self, t):
weight = torch.ones_like(t)
if self.config_train.newer_samples_weight > 1.0:
end_w = self.config_train.newer_samples_weight
start_t = self.config_train.newer_samples_start
time = (t.detach() - start_t) / (1.0 - start_t)
time = torch.maximum(torch.zeros_like(time), time)
time = torch.minimum(torch.ones_like(time), time) # time = 0 to 1
time = torch.pi * (time - 1.0) # time = -pi to 0
time = 0.5 * torch.cos(time) + 0.5 # time = 0 to 1
# scales end to be end weight times bigger than start weight
# with end weight being 1.0
weight = (1.0 + time * (end_w - 1.0)) / end_w
return weight
def _train_epoch(self, e, loader):
"""Make one complete iteration over all samples in dataloader and update model after each batch.
Args:
e (int): current epoch number
loader (torch DataLoader): Training Dataloader
"""
self.model.train()
for i, (inputs, targets, meta) in enumerate(loader):
# Run forward calculation
predicted = self.model.forward(inputs)
# Compute loss. no reduction.
loss = self.config_train.loss_func(predicted, targets)
# Weigh newer samples more.
loss = loss * self._get_time_based_sample_weight(t=inputs["time"])
loss = loss.mean()
# Regularize.
loss, reg_loss = self._add_batch_regualarizations(loss, e, i / float(len(loader)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
if self.metrics is not None:
self.metrics.update(
predicted=predicted.detach(), target=targets.detach(), values={"Loss": loss, "RegLoss": reg_loss}
)
if self.metrics is not None:
return self.metrics.compute(save=True)
else:
return None
def _add_batch_regualarizations(self, loss, e, iter_progress):
"""Add regulatization terms to loss, if applicable
Args:
loss (torch Tensor, scalar): current batch loss
e (int): current epoch number
iter_progress (float): this epoch's progress of iterating over dataset [0, 1]
Returns:
loss, reg_loss
"""
delay_weight = self.config_train.get_reg_delay_weight(e, iter_progress)
reg_loss = torch.zeros(1, dtype=torch.float, requires_grad=False)
if delay_weight > 0:
# Add regularization of AR weights - sparsify
if self.model.n_lags > 0 and self.config_ar.reg_lambda is not None:
reg_ar = self.config_ar.regularize(self.model.ar_weights)
reg_ar = torch.sum(reg_ar).squeeze() / self.n_forecasts
reg_loss += self.config_ar.reg_lambda * reg_ar
# Regularize trend to be smoother/sparse
l_trend = self.config_trend.trend_reg
if self.config_trend.n_changepoints > 0 and l_trend is not None and l_trend > 0:
reg_trend = utils.reg_func_trend(
weights=self.model.get_trend_deltas,
threshold=self.config_train.trend_reg_threshold,
)
reg_loss += l_trend * reg_trend
# Regularize seasonality: sparsify fourier term coefficients
l_season = self.config_train.reg_lambda_season
if self.model.season_dims is not None and l_season is not None and l_season > 0:
for name in self.model.season_params.keys():
reg_season = utils.reg_func_season(self.model.season_params[name])
reg_loss += l_season * reg_season
# Regularize events: sparsify events features coefficients
if self.events_config is not None or self.country_holidays_config is not None:
reg_events_loss = utils.reg_func_events(self.events_config, self.country_holidays_config, self.model)
reg_loss += reg_events_loss
# Regularize regressors: sparsify regressor features coefficients
if self.regressors_config is not None:
reg_regressor_loss = utils.reg_func_regressors(self.regressors_config, self.model)
reg_loss += reg_regressor_loss
reg_loss = delay_weight * reg_loss
loss = loss + reg_loss
return loss, reg_loss
def _evaluate_epoch(self, loader, val_metrics):
"""Evaluates model performance.
Args:
loader (torch DataLoader): instantiated Validation Dataloader (with TimeDataset)
val_metrics (MetricsCollection): validation metrics to be computed.
Returns:
dict with evaluation metrics
"""
with torch.no_grad():
self.model.eval()
for inputs, targets, meta in loader:
predicted = self.model.forward(inputs)
val_metrics.update(predicted=predicted.detach(), target=targets.detach())
val_metrics = val_metrics.compute(save=True)
return val_metrics
def _train(self, df_dict, df_val_dict=None, progress="bar"):
"""Execute model training procedure for a configured number of epochs.
Args:
df_dict (dict): dict of pd.DataFrames containing column 'ds', 'y' with training data
df_val_dict (dict): dict of pd.DataFrames containing column 'ds', 'y' with validation data
progress (str): Method of progress display: ["bar", "print", "plot", "plot-all", "none"]
"bar": display updating progress bar (tqdm)
"print" print out progress (fallback option)
"plot": plot a live updating graph of the training loss,
requires [live] install or livelossplot package installed.
"plot-all": "plot" extended to all recorded metrics.
Returns:
metrics (pd.DataFrame): df with metrics
"""
# parse progress arg
progress_bar = False
progress_print = False
plot_live_loss = False
plot_live_all_metrics = False
if progress.lower() == "bar":
progress_bar = True
elif progress.lower() == "print":
progress_print = True
elif progress.lower() == "plot":
plot_live_loss = True
elif progress.lower() in ["plot-all", "plotall", "plot all"]:
plot_live_loss = True
plot_live_all_metrics = True
elif not progress.lower() == "none":
raise ValueError("received unexpected value for progress {}".format(progress))
if self.metrics is None:
log.info("No progress prints or plots possible because metrics are deactivated.")
if df_val_dict is not None:
log.warning("Ignoring supplied df_val as no metrics are specified.")
if plot_live_loss or plot_live_all_metrics:
log.warning("Can not plot live loss as no metrics are specified.")
progress_bar = True
if progress_print:
log.warning("Can not print progress as no metrics are specified.")
return self._train_minimal(df_dict, progress_bar=progress_bar)
# set up data loader
loader = self._init_train_loader(df_dict)
# set up Metrics
if self.highlight_forecast_step_n is not None:
self.metrics.add_specific_target(target_pos=self.highlight_forecast_step_n - 1)
if not self.config_normalization.global_normalization:
log.warning("When Global modeling with local normalization, metrics are displayed in normalized scale.")
else:
if not self.config_normalization.normalize == "off":
self.metrics.set_shift_scale(
(
self.config_normalization.global_data_params["y"].shift,
self.config_normalization.global_data_params["y"].scale,
)
)
validate = df_val_dict is not None
if validate:
val_loader = self._init_val_loader(df_val_dict)
val_metrics = metrics.MetricsCollection([m.new() for m in self.metrics.batch_metrics])
# set up printing and plotting
if plot_live_loss:
try:
from livelossplot import PlotLosses
live_out = ["MatplotlibPlot"]
if not progress_bar:
live_out.append("ExtremaPrinter")
live_loss = PlotLosses(outputs=live_out)
plot_live_loss = True
except:
log.warning(
"To plot live loss, please install neuralprophet[live]."
"Using pip: 'pip install neuralprophet[live]'"
"Or install the missing package manually: 'pip install livelossplot'",
exc_info=True,
)
plot_live_loss = False
progress_bar = True
if progress_bar:
training_loop = tqdm(
range(self.config_train.epochs),
total=self.config_train.epochs,
leave=log.getEffectiveLevel() <= 20,
)
else:
training_loop = range(self.config_train.epochs)
start = time.time()
# run training loop
for e in training_loop:
metrics_live = OrderedDict({})
self.metrics.reset()
if validate:
val_metrics.reset()
# run epoch
epoch_metrics = self._train_epoch(e, loader)
# collect metrics
if validate:
val_epoch_metrics = self._evaluate_epoch(val_loader, val_metrics)
print_val_epoch_metrics = {k + "_val": v for k, v in val_epoch_metrics.items()}
else:
val_epoch_metrics = None
print_val_epoch_metrics = OrderedDict({})
# print metrics
if progress_bar:
training_loop.set_description(f"Epoch[{(e+1)}/{self.config_train.epochs}]")
training_loop.set_postfix(ordered_dict=epoch_metrics, **print_val_epoch_metrics)
elif progress_print:
metrics_string = utils.print_epoch_metrics(epoch_metrics, e=e, val_metrics=val_epoch_metrics)
if e == 0:
log.info(metrics_string.splitlines()[0])
log.info(metrics_string.splitlines()[1])
else:
log.info(metrics_string.splitlines()[1])
# plot metrics
if plot_live_loss:
metrics_train = list(epoch_metrics)
metrics_live["log-{}".format(metrics_train[0])] = np.log(epoch_metrics[metrics_train[0]])
if plot_live_all_metrics and len(metrics_train) > 1:
for i in range(1, len(metrics_train)):
metrics_live["{}".format(metrics_train[i])] = epoch_metrics[metrics_train[i]]
if validate:
metrics_val = list(val_epoch_metrics)
metrics_live["val_log-{}".format(metrics_val[0])] = np.log(val_epoch_metrics[metrics_val[0]])
if plot_live_all_metrics and len(metrics_val) > 1:
for i in range(1, len(metrics_val)):
metrics_live["val_{}".format(metrics_val[i])] = val_epoch_metrics[metrics_val[i]]
live_loss.update(metrics_live)
if e % (1 + self.config_train.epochs // 20) == 0 or e + 1 == self.config_train.epochs:
live_loss.send()
# return metrics as df
log.debug("Train Time: {:8.3f}".format(time.time() - start))
log.debug("Total Batches: {}".format(self.metrics.total_updates))
metrics_df = self.metrics.get_stored_as_df()
if validate:
metrics_df_val = val_metrics.get_stored_as_df()
for col in metrics_df_val.columns:
metrics_df["{}_val".format(col)] = metrics_df_val[col]
return metrics_df
def _train_minimal(self, df_dict, progress_bar=False):
"""Execute minimal model training procedure for a configured number of epochs.
Args:
df_dict (dict): dict of pd.DataFrames containing column 'ds', 'y' with training data
Returns:
None
"""
loader = self._init_train_loader(df_dict)
if progress_bar:
training_loop = tqdm(
range(self.config_train.epochs),
total=self.config_train.epochs,
leave=log.getEffectiveLevel() <= 20,
)
else:
training_loop = range(self.config_train.epochs)
for e in training_loop:
if progress_bar:
training_loop.set_description(f"Epoch[{(e+1)}/{self.config_train.epochs}]")
_ = self._train_epoch(e, loader)
def _eval_true_ar(self):
assert self.n_lags > 0
if self.highlight_forecast_step_n is None:
if self.n_lags > 1:
raise ValueError("Please define forecast_lag for sTPE computation")
forecast_pos = 1
else:
forecast_pos = self.highlight_forecast_step_n
weights = self.model.ar_weights.detach().numpy()
weights = weights[forecast_pos - 1, :][::-1]
sTPE = utils.symmetric_total_percentage_error(self.true_ar_weights, weights)
log.info("AR parameters: ", self.true_ar_weights, "\n", "Model weights: ", weights)
return sTPE
def _evaluate(self, loader):
"""Evaluates model performance.
Args:
loader (torch DataLoader): instantiated Validation Dataloader (with TimeDataset)
Returns:
df with evaluation metrics
"""
val_metrics = metrics.MetricsCollection([m.new() for m in self.metrics.batch_metrics])
if self.highlight_forecast_step_n is not None:
val_metrics.add_specific_target(target_pos=self.highlight_forecast_step_n - 1)
## Run
val_metrics_dict = self._evaluate_epoch(loader, val_metrics)
if self.true_ar_weights is not None:
val_metrics_dict["sTPE"] = self._eval_true_ar()
log.info("Validation metrics: {}".format(utils.print_epoch_metrics(val_metrics_dict)))
val_metrics_df = val_metrics.get_stored_as_df()
return val_metrics_df
def _make_future_dataframe(self, df, events_df, regressors_df, periods, n_historic_predictions):
if periods == 0 and n_historic_predictions is True:
log.warning(
"Not extending df into future as no periods specified." "You can call predict directly instead."
)
df = df.copy(deep=True)
_ = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=self.data_freq)
last_date = pd.to_datetime(df["ds"].copy(deep=True).dropna()).sort_values().max()
if events_df is not None:
events_df = events_df.copy(deep=True).reset_index(drop=True)
if regressors_df is not None:
regressors_df = regressors_df.copy(deep=True).reset_index(drop=True)
n_lags = 0 if self.n_lags is None else self.n_lags
if periods is None:
periods = 1 if n_lags == 0 else self.n_forecasts
else:
assert periods >= 0
if isinstance(n_historic_predictions, bool):
if n_historic_predictions:
n_historic_predictions = len(df) - n_lags
else:
n_historic_predictions = 0
elif not isinstance(n_historic_predictions, int):
log.error("non-integer value for n_historic_predictions set to zero.")
n_historic_predictions = 0
if periods == 0 and n_historic_predictions == 0:
raise ValueError("Set either history or future to contain more than zero values.")
# check for external regressors known in future
if self.regressors_config is not None and periods > 0:
if regressors_df is None:
raise ValueError("Future values of all user specified regressors not provided")
else:
for regressor in self.regressors_config.keys():
if regressor not in regressors_df.columns:
raise ValueError("Future values of user specified regressor {} not provided".format(regressor))
if len(df) < n_lags:
raise ValueError("Insufficient data for a prediction")
elif len(df) < n_lags + n_historic_predictions:
log.warning(
"Insufficient data for {} historic forecasts, reduced to {}.".format(
n_historic_predictions, len(df) - n_lags
)
)
n_historic_predictions = len(df) - n_lags
if (n_historic_predictions + n_lags) == 0:
df = pd.DataFrame(columns=df.columns)
else:
df = df[-(n_lags + n_historic_predictions) :]
if len(df) > 0:
if len(df.columns) == 1 and "ds" in df:
assert n_lags == 0
df = self._check_dataframe(df, check_y=False, exogenous=False)
else:
df = self._check_dataframe(df, check_y=n_lags > 0, exogenous=True)
# future data
# check for external events known in future
if self.events_config is not None and periods > 0 and events_df is None:
log.warning(
"Future values not supplied for user specified events. "
"All events being treated as not occurring in future"
)
if n_lags > 0:
if periods > 0 and periods != self.n_forecasts:
periods = self.n_forecasts
log.warning(
"Number of forecast steps is defined by n_forecasts. " "Adjusted to {}.".format(self.n_forecasts)
)
if periods > 0:
future_df = df_utils.make_future_df(
df_columns=df.columns,
last_date=last_date,
periods=periods,
freq=self.data_freq,
events_config=self.events_config,
events_df=events_df,
regressor_config=self.regressors_config,
regressors_df=regressors_df,
)
if len(df) > 0:
df = df.append(future_df)
else:
df = future_df
df.reset_index(drop=True, inplace=True)
return df
def _get_maybe_extend_periods(self, df):
n_lags = 0 if self.n_lags is None else self.n_lags
periods_add = 0
nan_at_end = 0
while len(df) > nan_at_end and df["y"].isnull().iloc[-(1 + nan_at_end)]:
nan_at_end += 1
if n_lags > 0:
if self.regressors_config is None:
# if dataframe has already been extended into future,
# don't extend beyond n_forecasts.
periods_add = max(0, self.n_forecasts - nan_at_end)
else:
# can not extend as we lack future regressor values.
periods_add = 0
return periods_add
def _maybe_extend_df(self, df_dict):
periods_add = {}
for df_name, df in df_dict.items():
_ = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=self.data_freq)
# to get all forecasteable values with df given, maybe extend into future:
periods_add[df_name] = self._get_maybe_extend_periods(df)
if periods_add[df_name] > 0:
# This does not include future regressors or events.
# periods should be 0 if those are configured.
last_date = pd.to_datetime(df["ds"].copy(deep=True)).sort_values().max()
future_df = df_utils.make_future_df(
df_columns=df.columns,
last_date=last_date,
periods=periods_add[df_name],
freq=self.data_freq,
)
df = df.append(future_df)
df.reset_index(drop=True, inplace=True)
df_dict[df_name] = df
return df_dict, periods_add
def _prepare_dataframe_to_predict(self, df_dict):
for df_name, df in df_dict.items():
df = df.copy(deep=True)
_ = df_utils.infer_frequency(df, n_lags=self.n_lags, freq=self.data_freq)
# check if received pre-processed df
if "y_scaled" in df.columns or "t" in df.columns:
raise ValueError(
"DataFrame has already been normalized. " "Please provide raw dataframe or future dataframe."
)
# Checks
n_lags = 0 if self.n_lags is None else self.n_lags
if len(df) == 0 or len(df) < n_lags:
raise ValueError("Insufficient data to make predictions.")
if len(df.columns) == 1 and "ds" in df:
if n_lags != 0:
raise ValueError("only datestamps provided but y values needed for auto-regression.")
df = self._check_dataframe(df, check_y=False, exogenous=False)
else:
df = self._check_dataframe(df, check_y=n_lags > 0, exogenous=False)
# fill in missing nans except for nans at end
df = self._handle_missing_data(df, freq=self.data_freq, predicting=True)
df.reset_index(drop=True, inplace=True)
df_dict[df_name] = df
return df_dict
def _predict_raw(self, df, df_name, include_components=False):
"""Runs the model to make predictions.
Predictions are returned in raw vector format without decomposition.
Predictions are given on a forecast origin basis, not on a target basis.
Args:
df (pandas DataFrame): Dataframe with columns 'ds' datestamps, 'y' time series values and
other external variables
df_name (str): name of the data params from which the current dataframe refers to (only in case of local_normalization)
include_components (bool): Whether to return individual components of forecast
Returns:
dates (pd.Series): timestamps referring to the start of the predictions.
predicted (np.array): Array containing the forecasts
components (Dict[np.array]): Dictionary of components containing an array
of each components contribution to the forecast
"""
if isinstance(df, dict):
raise ValueError("Receiced more than one DataFrame. Use a for loop for many dataframes.")
if "y_scaled" not in df.columns or "t" not in df.columns:
raise ValueError("Received unprepared dataframe to predict. " "Please call predict_dataframe_to_predict.")
dataset = self._create_dataset(df_dict={df_name: df}, predict_mode=True)
loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=False, drop_last=False)
if self.n_forecasts > 1:
dates = df["ds"].iloc[self.n_lags : -self.n_forecasts + 1]
else:
dates = df["ds"].iloc[self.n_lags :]
predicted_vectors = list()
component_vectors = None
with torch.no_grad():
self.model.eval()
for inputs, _, _ in loader:
predicted = self.model.forward(inputs)
predicted_vectors.append(predicted.detach().numpy())
if include_components:
components = self.model.compute_components(inputs)
if component_vectors is None:
component_vectors = {name: [value.detach().numpy()] for name, value in components.items()}
else:
for name, value in components.items():
component_vectors[name].append(value.detach().numpy())
predicted = np.concatenate(predicted_vectors)
data_params = self.config_normalization.get_data_params(df_name)
scale_y, shift_y = data_params["y"].scale, data_params["y"].shift
predicted = predicted * scale_y + shift_y
if include_components:
components = {name: np.concatenate(value) for name, value in component_vectors.items()}
for name, value in components.items():
if "multiplicative" in name:
continue
elif "event_" in name:
event_name = name.split("_")[1]
if self.events_config is not None and event_name in self.events_config:
if self.events_config[event_name].mode == "multiplicative":
continue
elif (
self.country_holidays_config is not None
and event_name in self.country_holidays_config.holiday_names
):
if self.country_holidays_config.mode == "multiplicative":
continue
elif "season" in name and self.season_config.mode == "multiplicative":
continue
# scale additive components
components[name] = value * scale_y
if "trend" in name:
components[name] += shift_y
else:
components = None
return dates, predicted, components
def _convert_raw_predictions_to_raw_df(self, dates, predicted, components=None):
"""Turns forecast-origin-wise predictions into forecast-target-wise predictions.
Args:
dates (pd.Series): timestamps referring to the start of the predictions.
predicted (np.array): Array containing the forecasts
components (Dict[np.array]): Dictionary of components containing an array
of each components' contribution to the forecast
Returns:
df_raw (pandas DataFrame): columns 'ds', 'y', and ['step<i>']
where step<i> refers to the i-step-ahead prediction *made at* this row's datetime.
e.g. the first forecast step0 is the prediction for this timestamp,
the step1 is for the timestamp after, ...
... step3 is the prediction for 3 steps into the future,
predicted using information up to (excluding) this datetime.
"""
if isinstance(dates, dict):
raise ValueError("Receiced more than one DataFrame. Use a for loop for many dataframes.")
predicted_names = ["step{}".format(i) for i in range(self.n_forecasts)]
all_data = predicted
all_names = predicted_names
if components is not None:
for comp_name, comp_data in components.items():
all_data = np.concatenate((all_data, comp_data), 1)
all_names += ["{}{}".format(comp_name, i) for i in range(self.n_forecasts)]
df_raw = pd.DataFrame(data=all_data, columns=all_names)
df_raw.insert(0, "ds", dates.values)
return df_raw
def _reshape_raw_predictions_to_forecst_df(self, df, predicted, components): # DOES NOT ACCEPT DICT
"""Turns forecast-origin-wise predictions into forecast-target-wise predictions.
Args:
df (pd.DataFrame): input dataframe
predicted (np.array): Array containing the forecasts
components (Dict[np.array]): Dictionary of components containing an array
of each components' contribution to the forecast
Returns:
df_forecast (pd.DataFrame): columns 'ds', 'y', 'trend' and ['yhat<i>']
where yhat<i> refers to the i-step-ahead prediction for this row's datetime.
e.g. yhat3 is the prediction for this datetime, predicted 3 steps ago, "3 steps old".
"""
if isinstance(df, dict):
raise ValueError("Receiced more than one DataFrame. Use a for loop for many dataframes.")
cols = ["ds", "y"] # cols to keep from df
df_forecast = pd.concat((df[cols],), axis=1)
# create a line for each forecast_lag
# 'yhat<i>' is the forecast for 'y' at 'ds' from i steps ago.
for forecast_lag in range(1, self.n_forecasts + 1):
forecast = predicted[:, forecast_lag - 1]
pad_before = self.n_lags + forecast_lag - 1
pad_after = self.n_forecasts - forecast_lag
yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after))
df_forecast["yhat{}".format(forecast_lag)] = yhat
df_forecast["residual{}".format(forecast_lag)] = yhat - df_forecast["y"]
if components is None:
return df_forecast
# else add components
lagged_components = [
"ar",
]
if self.config_covar is not None:
for name in self.config_covar.keys():
lagged_components.append("lagged_regressor_{}".format(name))
for comp in lagged_components:
if comp in components:
for forecast_lag in range(1, self.n_forecasts + 1):
forecast = components[comp][:, forecast_lag - 1]
pad_before = self.n_lags + forecast_lag - 1
pad_after = self.n_forecasts - forecast_lag
yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after))
df_forecast["{}{}".format(comp, forecast_lag)] = yhat
# only for non-lagged components
for comp in components:
if comp not in lagged_components:
forecast_0 = components[comp][0, :]
forecast_rest = components[comp][1:, self.n_forecasts - 1]
yhat = np.concatenate(([None] * self.n_lags, forecast_0, forecast_rest))
df_forecast[comp] = yhat
return df_forecast
| [
"logging.getLogger",
"neuralprophet.df_utils.join_dataframes",
"neuralprophet.utils.fcst_df_to_last_forecast",
"numpy.log",
"neuralprophet.metrics.ValueMetric",
"neuralprophet.df_utils.fill_linear_then_rolling_avg",
"torch.cos",
"torch.sum",
"neuralprophet.df_utils.crossvalidation_split_df",
"neur... | [((523, 557), 'logging.getLogger', 'logging.getLogger', (['"""NP.forecaster"""'], {}), "('NP.forecaster')\n", (540, 557), False, 'import logging\n'), ((7985, 8195), 'neuralprophet.configure.Normalization', 'configure.Normalization', ([], {'normalize': 'normalize', 'global_normalization': 'global_normalization', 'global_time_normalization': 'global_time_normalization', 'unknown_data_normalization': 'unknown_data_normalization'}), '(normalize=normalize, global_normalization=\n global_normalization, global_time_normalization=\n global_time_normalization, unknown_data_normalization=\n unknown_data_normalization)\n', (8008, 8195), False, 'from neuralprophet import configure\n'), ((8441, 8487), 'neuralprophet.configure.from_kwargs', 'configure.from_kwargs', (['configure.Train', 'kwargs'], {}), '(configure.Train, kwargs)\n', (8462, 8487), False, 'from neuralprophet import configure\n'), ((9614, 9657), 'neuralprophet.configure.from_kwargs', 'configure.from_kwargs', (['configure.AR', 'kwargs'], {}), '(configure.AR, kwargs)\n', (9635, 9657), False, 'from neuralprophet import configure\n'), ((10020, 10066), 'neuralprophet.configure.from_kwargs', 'configure.from_kwargs', (['configure.Model', 'kwargs'], {}), '(configure.Model, kwargs)\n', (10041, 10066), False, 'from neuralprophet import configure\n'), ((10112, 10158), 'neuralprophet.configure.from_kwargs', 'configure.from_kwargs', (['configure.Trend', 'kwargs'], {}), '(configure.Trend, kwargs)\n', (10133, 10158), False, 'from neuralprophet import configure\n'), ((10211, 10381), 'neuralprophet.configure.AllSeason', 'configure.AllSeason', ([], {'mode': 'seasonality_mode', 'reg_lambda': 'seasonality_reg', 'yearly_arg': 'yearly_seasonality', 'weekly_arg': 'weekly_seasonality', 'daily_arg': 'daily_seasonality'}), '(mode=seasonality_mode, reg_lambda=seasonality_reg,\n yearly_arg=yearly_seasonality, weekly_arg=weekly_seasonality, daily_arg\n =daily_seasonality)\n', (10230, 10381), False, 'from neuralprophet import configure\n'), ((14209, 14287), 'neuralprophet.configure.Regressor', 'configure.Regressor', ([], {'reg_lambda': 'regularization', 'normalize': 'normalize', 'mode': 'mode'}), '(reg_lambda=regularization, normalize=normalize, mode=mode)\n', (14228, 14287), False, 'from neuralprophet import configure\n'), ((16944, 17080), 'neuralprophet.configure.Holidays', 'configure.Holidays', ([], {'country': 'country_name', 'lower_window': 'lower_window', 'upper_window': 'upper_window', 'reg_lambda': 'regularization', 'mode': 'mode'}), '(country=country_name, lower_window=lower_window,\n upper_window=upper_window, reg_lambda=regularization, mode=mode)\n', (16962, 17080), False, 'from neuralprophet import configure\n'), ((19890, 19920), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['df'], {}), '(df)\n', (19916, 19920), False, 'from neuralprophet import df_utils\n'), ((20164, 20228), 'neuralprophet.df_utils.infer_frequency', 'df_utils.infer_frequency', (['df_dict'], {'n_lags': 'self.n_lags', 'freq': 'freq'}), '(df_dict, n_lags=self.n_lags, freq=freq)\n', (20188, 20228), False, 'from neuralprophet import df_utils\n'), ((22895, 22925), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['df'], {}), '(df)\n', (22921, 22925), False, 'from neuralprophet import df_utils\n'), ((23792, 23863), 'neuralprophet.df_utils.maybe_get_single_df_from_df_dict', 'df_utils.maybe_get_single_df_from_df_dict', (['df_dict', 'received_unnamed_df'], {}), '(df_dict, received_unnamed_df)\n', (23833, 23863), False, 'from neuralprophet import df_utils\n'), ((24191, 24221), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['df'], {}), '(df)\n', (24217, 24221), False, 'from neuralprophet import df_utils\n'), ((24429, 24503), 'neuralprophet.df_utils.infer_frequency', 'df_utils.infer_frequency', (['df_dict'], {'n_lags': 'self.n_lags', 'freq': 'self.data_freq'}), '(df_dict, n_lags=self.n_lags, freq=self.data_freq)\n', (24453, 24503), False, 'from neuralprophet import df_utils\n'), ((25931, 25961), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['df'], {}), '(df)\n', (25957, 25961), False, 'from neuralprophet import df_utils\n'), ((26048, 26107), 'neuralprophet.df_utils.infer_frequency', 'df_utils.infer_frequency', (['df'], {'n_lags': 'self.n_lags', 'freq': 'freq'}), '(df, n_lags=self.n_lags, freq=freq)\n', (26072, 26107), False, 'from neuralprophet import df_utils\n'), ((26207, 26347), 'neuralprophet.df_utils.split_df', 'df_utils.split_df', (['df'], {'n_lags': 'self.n_lags', 'n_forecasts': 'self.n_forecasts', 'valid_p': 'valid_p', 'inputs_overbleed': '(True)', 'local_split': 'local_split'}), '(df, n_lags=self.n_lags, n_forecasts=self.n_forecasts,\n valid_p=valid_p, inputs_overbleed=True, local_split=local_split)\n', (26224, 26347), False, 'from neuralprophet import df_utils\n'), ((26446, 26518), 'neuralprophet.df_utils.maybe_get_single_df_from_df_dict', 'df_utils.maybe_get_single_df_from_df_dict', (['df_train', 'received_unnamed_df'], {}), '(df_train, received_unnamed_df)\n', (26487, 26518), False, 'from neuralprophet import df_utils\n'), ((26536, 26606), 'neuralprophet.df_utils.maybe_get_single_df_from_df_dict', 'df_utils.maybe_get_single_df_from_df_dict', (['df_val', 'received_unnamed_df'], {}), '(df_val, received_unnamed_df)\n', (26577, 26606), False, 'from neuralprophet import df_utils\n'), ((27695, 27754), 'neuralprophet.df_utils.infer_frequency', 'df_utils.infer_frequency', (['df'], {'n_lags': 'self.n_lags', 'freq': 'freq'}), '(df, n_lags=self.n_lags, freq=freq)\n', (27719, 27754), False, 'from neuralprophet import df_utils\n'), ((27843, 27994), 'neuralprophet.df_utils.crossvalidation_split_df', 'df_utils.crossvalidation_split_df', (['df'], {'n_lags': 'self.n_lags', 'n_forecasts': 'self.n_forecasts', 'k': 'k', 'fold_pct': 'fold_pct', 'fold_overlap_pct': 'fold_overlap_pct'}), '(df, n_lags=self.n_lags, n_forecasts=self.\n n_forecasts, k=k, fold_pct=fold_pct, fold_overlap_pct=fold_overlap_pct)\n', (27876, 27994), False, 'from neuralprophet import df_utils\n'), ((29135, 29194), 'neuralprophet.df_utils.infer_frequency', 'df_utils.infer_frequency', (['df'], {'n_lags': 'self.n_lags', 'freq': 'freq'}), '(df, n_lags=self.n_lags, freq=freq)\n', (29159, 29194), False, 'from neuralprophet import df_utils\n'), ((29299, 29442), 'neuralprophet.df_utils.double_crossvalidation_split_df', 'df_utils.double_crossvalidation_split_df', (['df'], {'n_lags': 'self.n_lags', 'n_forecasts': 'self.n_forecasts', 'k': 'k', 'valid_pct': 'valid_pct', 'test_pct': 'test_pct'}), '(df, n_lags=self.n_lags,\n n_forecasts=self.n_forecasts, k=k, valid_pct=valid_pct, test_pct=test_pct)\n', (29339, 29442), False, 'from neuralprophet import df_utils\n'), ((30294, 30324), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['df'], {}), '(df)\n', (30320, 30324), False, 'from neuralprophet import df_utils\n'), ((31022, 31093), 'neuralprophet.df_utils.maybe_get_single_df_from_df_dict', 'df_utils.maybe_get_single_df_from_df_dict', (['df_dict', 'received_unnamed_df'], {}), '(df_dict, received_unnamed_df)\n', (31063, 31093), False, 'from neuralprophet import df_utils\n'), ((31273, 31303), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['df'], {}), '(df)\n', (31299, 31303), False, 'from neuralprophet import df_utils\n'), ((31357, 31394), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['events_df'], {}), '(events_df)\n', (31383, 31394), False, 'from neuralprophet import df_utils\n'), ((31456, 31497), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['regressors_df'], {}), '(regressors_df)\n', (31482, 31497), False, 'from neuralprophet import df_utils\n'), ((32592, 32679), 'neuralprophet.df_utils.maybe_get_single_df_from_df_dict', 'df_utils.maybe_get_single_df_from_df_dict', (['df_future_dataframe', 'received_unnamed_df'], {}), '(df_future_dataframe,\n received_unnamed_df)\n', (32633, 32679), False, 'from neuralprophet import df_utils\n'), ((33048, 33078), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['df'], {}), '(df)\n', (33074, 33078), False, 'from neuralprophet import df_utils\n'), ((33626, 33697), 'neuralprophet.df_utils.maybe_get_single_df_from_df_dict', 'df_utils.maybe_get_single_df_from_df_dict', (['df_dict', 'received_unnamed_df'], {}), '(df_dict, received_unnamed_df)\n', (33667, 33697), False, 'from neuralprophet import df_utils\n'), ((34107, 34137), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['df'], {}), '(df)\n', (34133, 34137), False, 'from neuralprophet import df_utils\n'), ((35539, 35610), 'neuralprophet.df_utils.maybe_get_single_df_from_df_dict', 'df_utils.maybe_get_single_df_from_df_dict', (['df_dict', 'received_unnamed_df'], {}), '(df_dict, received_unnamed_df)\n', (35580, 35610), False, 'from neuralprophet import df_utils\n'), ((37686, 37810), 'neuralprophet.plot_forecast.plot', 'plot', ([], {'fcst': 'fcst', 'ax': 'ax', 'xlabel': 'xlabel', 'ylabel': 'ylabel', 'figsize': 'figsize', 'highlight_forecast': 'self.highlight_forecast_step_n'}), '(fcst=fcst, ax=ax, xlabel=xlabel, ylabel=ylabel, figsize=figsize,\n highlight_forecast=self.highlight_forecast_step_n)\n', (37690, 37810), False, 'from neuralprophet.plot_forecast import plot, plot_components\n'), ((39251, 39326), 'neuralprophet.utils.fcst_df_to_last_forecast', 'utils.fcst_df_to_last_forecast', (['fcst'], {'n_last': '(1 + include_previous_forecasts)'}), '(fcst, n_last=1 + include_previous_forecasts)\n', (39281, 39326), False, 'from neuralprophet import utils\n'), ((39342, 39488), 'neuralprophet.plot_forecast.plot', 'plot', ([], {'fcst': 'fcst', 'ax': 'ax', 'xlabel': 'xlabel', 'ylabel': 'ylabel', 'figsize': 'figsize', 'highlight_forecast': 'self.highlight_forecast_step_n', 'line_per_origin': '(True)'}), '(fcst=fcst, ax=ax, xlabel=xlabel, ylabel=ylabel, figsize=figsize,\n highlight_forecast=self.highlight_forecast_step_n, line_per_origin=True)\n', (39346, 39488), False, 'from neuralprophet.plot_forecast import plot, plot_components\n'), ((40099, 40226), 'neuralprophet.plot_forecast.plot_components', 'plot_components', ([], {'m': 'self', 'fcst': 'fcst', 'figsize': 'figsize', 'forecast_in_focus': 'self.highlight_forecast_step_n', 'residuals': 'residuals'}), '(m=self, fcst=fcst, figsize=figsize, forecast_in_focus=self.\n highlight_forecast_step_n, residuals=residuals)\n', (40114, 40226), False, 'from neuralprophet.plot_forecast import plot, plot_components\n'), ((41167, 41336), 'neuralprophet.plot_model_parameters.plot_parameters', 'plot_parameters', ([], {'m': 'self', 'forecast_in_focus': 'self.highlight_forecast_step_n', 'weekly_start': 'weekly_start', 'yearly_start': 'yearly_start', 'figsize': 'figsize', 'df_name': 'df_name'}), '(m=self, forecast_in_focus=self.highlight_forecast_step_n,\n weekly_start=weekly_start, yearly_start=yearly_start, figsize=figsize,\n df_name=df_name)\n', (41182, 41336), False, 'from neuralprophet.plot_model_parameters import plot_parameters\n'), ((41580, 41982), 'neuralprophet.time_net.TimeNet', 'time_net.TimeNet', ([], {'config_trend': 'self.config_trend', 'config_season': 'self.season_config', 'config_covar': 'self.config_covar', 'config_regressors': 'self.regressors_config', 'config_events': 'self.events_config', 'config_holidays': 'self.country_holidays_config', 'n_forecasts': 'self.n_forecasts', 'n_lags': 'self.n_lags', 'num_hidden_layers': 'self.config_model.num_hidden_layers', 'd_hidden': 'self.config_model.d_hidden'}), '(config_trend=self.config_trend, config_season=self.\n season_config, config_covar=self.config_covar, config_regressors=self.\n regressors_config, config_events=self.events_config, config_holidays=\n self.country_holidays_config, n_forecasts=self.n_forecasts, n_lags=self\n .n_lags, num_hidden_layers=self.config_model.num_hidden_layers,\n d_hidden=self.config_model.d_hidden)\n', (41596, 41982), False, 'from neuralprophet import time_net\n'), ((42729, 43060), 'neuralprophet.time_dataset.GlobalTimeDataset', 'time_dataset.GlobalTimeDataset', (['df_dict'], {'predict_mode': 'predict_mode', 'n_lags': 'self.n_lags', 'n_forecasts': 'self.n_forecasts', 'season_config': 'self.season_config', 'events_config': 'self.events_config', 'country_holidays_config': 'self.country_holidays_config', 'covar_config': 'self.config_covar', 'regressors_config': 'self.regressors_config'}), '(df_dict, predict_mode=predict_mode, n_lags=\n self.n_lags, n_forecasts=self.n_forecasts, season_config=self.\n season_config, events_config=self.events_config,\n country_holidays_config=self.country_holidays_config, covar_config=self\n .config_covar, regressors_config=self.regressors_config)\n', (42759, 43060), False, 'from neuralprophet import time_dataset\n'), ((54865, 54898), 'neuralprophet.df_utils.join_dataframes', 'df_utils.join_dataframes', (['df_dict'], {}), '(df_dict)\n', (54889, 54898), False, 'from neuralprophet import df_utils\n'), ((55054, 55127), 'neuralprophet.utils.set_auto_seasonalities', 'utils.set_auto_seasonalities', (['df_merged'], {'season_config': 'self.season_config'}), '(df_merged, season_config=self.season_config)\n', (55082, 55127), False, 'from neuralprophet import utils\n'), ((55453, 55527), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.config_train.batch_size', 'shuffle': '(True)'}), '(dataset, batch_size=self.config_train.batch_size, shuffle=True)\n', (55463, 55527), False, 'from torch.utils.data import DataLoader\n'), ((56719, 56737), 'torch.ones_like', 'torch.ones_like', (['t'], {}), '(t)\n', (56734, 56737), False, 'import torch\n'), ((59233, 59287), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'torch.float', 'requires_grad': '(False)'}), '(1, dtype=torch.float, requires_grad=False)\n', (59244, 59287), False, 'import torch\n'), ((66192, 66203), 'time.time', 'time.time', ([], {}), '()\n', (66201, 66203), False, 'import time\n'), ((70287, 70356), 'neuralprophet.utils.symmetric_total_percentage_error', 'utils.symmetric_total_percentage_error', (['self.true_ar_weights', 'weights'], {}), '(self.true_ar_weights, weights)\n', (70325, 70356), False, 'from neuralprophet import utils\n'), ((71691, 71760), 'neuralprophet.df_utils.infer_frequency', 'df_utils.infer_frequency', (['df'], {'n_lags': 'self.n_lags', 'freq': 'self.data_freq'}), '(df, n_lags=self.n_lags, freq=self.data_freq)\n', (71715, 71760), False, 'from neuralprophet import df_utils\n'), ((80897, 80930), 'numpy.concatenate', 'np.concatenate', (['predicted_vectors'], {}), '(predicted_vectors)\n', (80911, 80930), True, 'import numpy as np\n'), ((83934, 83980), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'all_data', 'columns': 'all_names'}), '(data=all_data, columns=all_names)\n', (83946, 83980), True, 'import pandas as pd\n'), ((85037, 85067), 'pandas.concat', 'pd.concat', (['(df[cols],)'], {'axis': '(1)'}), '((df[cols],), axis=1)\n', (85046, 85067), True, 'import pandas as pd\n'), ((12675, 12770), 'neuralprophet.configure.Covar', 'configure.Covar', ([], {'reg_lambda': 'regularization', 'normalize': 'normalize', 'as_scalar': 'only_last_value'}), '(reg_lambda=regularization, normalize=normalize, as_scalar=\n only_last_value)\n', (12690, 12770), False, 'from neuralprophet import configure\n'), ((15224, 15239), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (15235, 15239), False, 'from collections import OrderedDict\n'), ((15656, 15767), 'neuralprophet.configure.Event', 'configure.Event', ([], {'lower_window': 'lower_window', 'upper_window': 'upper_window', 'reg_lambda': 'regularization', 'mode': 'mode'}), '(lower_window=lower_window, upper_window=upper_window,\n reg_lambda=regularization, mode=mode)\n', (15671, 15767), False, 'from neuralprophet import configure\n'), ((20809, 20850), 'neuralprophet.df_utils.prep_copy_df_dict', 'df_utils.prep_copy_df_dict', (['validation_df'], {}), '(validation_df)\n', (20835, 20850), False, 'from neuralprophet import df_utils\n'), ((30786, 30888), 'neuralprophet.df_utils.convert_events_to_features', 'df_utils.convert_events_to_features', (['df_i'], {'events_config': 'self.events_config', 'events_df': 'events_df_i'}), '(df_i, events_config=self.events_config,\n events_df=events_df_i)\n', (30821, 30888), False, 'from neuralprophet import df_utils\n'), ((33566, 33612), 'pandas.DataFrame', 'pd.DataFrame', (["{'ds': df['ds'], 'trend': trend}"], {}), "({'ds': df['ds'], 'trend': trend})\n", (33578, 33612), True, 'import pandas as pd\n'), ((34328, 34427), 'neuralprophet.time_dataset.TimeDataset', 'time_dataset.TimeDataset', (['df'], {'name': 'df_name', 'season_config': 'self.season_config', 'predict_mode': '(True)'}), '(df, name=df_name, season_config=self.season_config,\n predict_mode=True)\n', (34352, 34427), False, 'from neuralprophet import time_dataset\n'), ((35482, 35525), 'pandas.DataFrame', 'pd.DataFrame', (["{'ds': df['ds'], **predicted}"], {}), "({'ds': df['ds'], **predicted})\n", (35494, 35525), True, 'import pandas as pd\n'), ((43957, 44002), 'neuralprophet.df_utils.add_missing_dates_nan', 'df_utils.add_missing_dates_nan', (['df'], {'freq': 'freq'}), '(df, freq=freq)\n', (43987, 44002), False, 'from neuralprophet import df_utils\n'), ((50438, 50665), 'neuralprophet.df_utils.check_single_dataframe', 'df_utils.check_single_dataframe', ([], {'df': 'df_i', 'check_y': 'check_y', 'covariates': '(self.config_covar if exogenous else None)', 'regressors': '(self.regressors_config if exogenous else None)', 'events': '(self.events_config if exogenous else None)'}), '(df=df_i, check_y=check_y, covariates=self.\n config_covar if exogenous else None, regressors=self.regressors_config if\n exogenous else None, events=self.events_config if exogenous else None)\n', (50469, 50665), False, 'from neuralprophet import df_utils\n'), ((53733, 53770), 'neuralprophet.df_utils.normalize', 'df_utils.normalize', (['df_i', 'data_params'], {}), '(df_i, data_params)\n', (53751, 53770), False, 'from neuralprophet import df_utils\n'), ((61603, 61618), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (61616, 61618), False, 'import torch\n'), ((66291, 66306), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (66302, 66306), False, 'from collections import OrderedDict\n'), ((73814, 73846), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (73826, 73846), True, 'import pandas as pd\n'), ((74890, 75133), 'neuralprophet.df_utils.make_future_df', 'df_utils.make_future_df', ([], {'df_columns': 'df.columns', 'last_date': 'last_date', 'periods': 'periods', 'freq': 'self.data_freq', 'events_config': 'self.events_config', 'events_df': 'events_df', 'regressor_config': 'self.regressors_config', 'regressors_df': 'regressors_df'}), '(df_columns=df.columns, last_date=last_date, periods\n =periods, freq=self.data_freq, events_config=self.events_config,\n events_df=events_df, regressor_config=self.regressors_config,\n regressors_df=regressors_df)\n', (74913, 75133), False, 'from neuralprophet import df_utils\n'), ((76242, 76311), 'neuralprophet.df_utils.infer_frequency', 'df_utils.infer_frequency', (['df'], {'n_lags': 'self.n_lags', 'freq': 'self.data_freq'}), '(df, n_lags=self.n_lags, freq=self.data_freq)\n', (76266, 76311), False, 'from neuralprophet import df_utils\n'), ((77296, 77365), 'neuralprophet.df_utils.infer_frequency', 'df_utils.infer_frequency', (['df'], {'n_lags': 'self.n_lags', 'freq': 'self.data_freq'}), '(df, n_lags=self.n_lags, freq=self.data_freq)\n', (77320, 77365), False, 'from neuralprophet import df_utils\n'), ((80217, 80232), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (80230, 80232), False, 'import torch\n'), ((85430, 85497), 'numpy.concatenate', 'np.concatenate', (['([None] * pad_before, forecast, [None] * pad_after)'], {}), '(([None] * pad_before, forecast, [None] * pad_after))\n', (85444, 85497), True, 'import numpy as np\n'), ((12621, 12636), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (12632, 12636), False, 'from collections import OrderedDict\n'), ((31754, 31829), 'neuralprophet.df_utils.compare_dict_keys', 'df_utils.compare_dict_keys', (['df_dict', 'df_dict_events', '"""dataframes"""', '"""events"""'], {}), "(df_dict, df_dict_events, 'dataframes', 'events')\n", (31780, 31829), False, 'from neuralprophet import df_utils\n'), ((32106, 32193), 'neuralprophet.df_utils.compare_dict_keys', 'df_utils.compare_dict_keys', (['df_dict', 'df_dict_regressors', '"""dataframes"""', '"""regressors"""'], {}), "(df_dict, df_dict_regressors, 'dataframes',\n 'regressors')\n", (32132, 32193), False, 'from neuralprophet import df_utils\n'), ((33280, 33313), 'numpy.expand_dims', 'np.expand_dims', (["df['t'].values", '(1)'], {}), "(df['t'].values, 1)\n", (33294, 33313), True, 'import numpy as np\n'), ((35197, 35228), 'numpy.concatenate', 'np.concatenate', (['predicted[name]'], {}), '(predicted[name])\n', (35211, 35228), True, 'import numpy as np\n'), ((57007, 57029), 'torch.zeros_like', 'torch.zeros_like', (['time'], {}), '(time)\n', (57023, 57029), False, 'import torch\n'), ((57070, 57091), 'torch.ones_like', 'torch.ones_like', (['time'], {}), '(time)\n', (57085, 57091), False, 'import torch\n'), ((59889, 60000), 'neuralprophet.utils.reg_func_trend', 'utils.reg_func_trend', ([], {'weights': 'self.model.get_trend_deltas', 'threshold': 'self.config_train.trend_reg_threshold'}), '(weights=self.model.get_trend_deltas, threshold=self.\n config_train.trend_reg_threshold)\n', (59909, 60000), False, 'from neuralprophet import utils\n'), ((60728, 60815), 'neuralprophet.utils.reg_func_events', 'utils.reg_func_events', (['self.events_config', 'self.country_holidays_config', 'self.model'], {}), '(self.events_config, self.country_holidays_config,\n self.model)\n', (60749, 60815), False, 'from neuralprophet import utils\n'), ((61023, 61084), 'neuralprophet.utils.reg_func_regressors', 'utils.reg_func_regressors', (['self.regressors_config', 'self.model'], {}), '(self.regressors_config, self.model)\n', (61048, 61084), False, 'from neuralprophet import utils\n'), ((65399, 65427), 'livelossplot.PlotLosses', 'PlotLosses', ([], {'outputs': 'live_out'}), '(outputs=live_out)\n', (65409, 65427), False, 'from livelossplot import PlotLosses\n'), ((66816, 66831), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (66827, 66831), False, 'from collections import OrderedDict\n'), ((67629, 67668), 'numpy.log', 'np.log', (['epoch_metrics[metrics_train[0]]'], {}), '(epoch_metrics[metrics_train[0]])\n', (67635, 67668), True, 'import numpy as np\n'), ((71201, 71244), 'neuralprophet.utils.print_epoch_metrics', 'utils.print_epoch_metrics', (['val_metrics_dict'], {}), '(val_metrics_dict)\n', (71226, 71244), False, 'from neuralprophet import utils\n'), ((76759, 76882), 'neuralprophet.df_utils.make_future_df', 'df_utils.make_future_df', ([], {'df_columns': 'df.columns', 'last_date': 'last_date', 'periods': 'periods_add[df_name]', 'freq': 'self.data_freq'}), '(df_columns=df.columns, last_date=last_date, periods\n =periods_add[df_name], freq=self.data_freq)\n', (76782, 76882), False, 'from neuralprophet import df_utils\n'), ((81192, 81213), 'numpy.concatenate', 'np.concatenate', (['value'], {}), '(value)\n', (81206, 81213), True, 'import numpy as np\n'), ((83783, 83823), 'numpy.concatenate', 'np.concatenate', (['(all_data, comp_data)', '(1)'], {}), '((all_data, comp_data), 1)\n', (83797, 83823), True, 'import numpy as np\n'), ((86743, 86808), 'numpy.concatenate', 'np.concatenate', (['([None] * self.n_lags, forecast_0, forecast_rest)'], {}), '(([None] * self.n_lags, forecast_0, forecast_rest))\n', (86757, 86808), True, 'import numpy as np\n'), ((57204, 57219), 'torch.cos', 'torch.cos', (['time'], {}), '(time)\n', (57213, 57219), False, 'import torch\n'), ((60423, 60476), 'neuralprophet.utils.reg_func_season', 'utils.reg_func_season', (['self.model.season_params[name]'], {}), '(self.model.season_params[name])\n', (60444, 60476), False, 'from neuralprophet import utils\n'), ((67144, 67220), 'neuralprophet.utils.print_epoch_metrics', 'utils.print_epoch_metrics', (['epoch_metrics'], {'e': 'e', 'val_metrics': 'val_epoch_metrics'}), '(epoch_metrics, e=e, val_metrics=val_epoch_metrics)\n', (67169, 67220), False, 'from neuralprophet import utils\n'), ((68058, 68099), 'numpy.log', 'np.log', (['val_epoch_metrics[metrics_val[0]]'], {}), '(val_epoch_metrics[metrics_val[0]])\n', (68064, 68099), True, 'import numpy as np\n'), ((68608, 68619), 'time.time', 'time.time', ([], {}), '()\n', (68617, 68619), False, 'import time\n'), ((86331, 86398), 'numpy.concatenate', 'np.concatenate', (['([None] * pad_before, forecast, [None] * pad_after)'], {}), '(([None] * pad_before, forecast, [None] * pad_after))\n', (86345, 86398), True, 'import numpy as np\n'), ((9528, 9558), 'neuralprophet.metrics.ValueMetric', 'metrics.ValueMetric', (['"""RegLoss"""'], {}), "('RegLoss')\n", (9547, 9558), False, 'from neuralprophet import metrics\n'), ((47379, 47501), 'neuralprophet.df_utils.fill_linear_then_rolling_avg', 'df_utils.fill_linear_then_rolling_avg', (['df[column]'], {'limit_linear': 'self.impute_limit_linear', 'rolling': 'self.impute_rolling'}), '(df[column], limit_linear=self.\n impute_limit_linear, rolling=self.impute_rolling)\n', (47416, 47501), False, 'from neuralprophet import df_utils\n'), ((9381, 9428), 'neuralprophet.metrics.LossMetric', 'metrics.LossMetric', (['self.config_train.loss_func'], {}), '(self.config_train.loss_func)\n', (9399, 9428), False, 'from neuralprophet import metrics\n'), ((59554, 59571), 'torch.sum', 'torch.sum', (['reg_ar'], {}), '(reg_ar)\n', (59563, 59571), False, 'import torch\n'), ((54760, 54801), 'pandas.Series', 'pd.Series', (['self.config_trend.changepoints'], {}), '(self.config_trend.changepoints)\n', (54769, 54801), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Benchmarks for primitive/low-level array operations.
"""
__all__ = [
"bench_astype",
"bench_astype_numba",
"bench_astype_numpy",
"bench_bool_index",
"bench_bool_index_numpy",
"bench_compare_ops",
"bench_compare_ops_numpy",
"bench_mbget",
"bench_mbget_numba",
# comparisons
"compare_astype",
"compare_bool_index",
"compare_compare_ops",
"compare_mbget",
]
import itertools
import logging
import operator
from typing import List
import numpy as np
from numpy.random import default_rng
import numba as nb
from .benchmark import _timestamp_funcs
from .rand_data import rand_array, rand_fancyindex
from .runner import create_comparison_dataset, create_trial_dataset
from ..rt_enum import TypeRegister, NumpyCharTypes
from ..rt_dataset import Dataset
from ..rt_numpy import empty
from ..rt_utils import mbget, _mbget_2dims #, mbget_numba
logger = logging.getLogger(__name__)
"""The logger for this module."""
timestamper = _timestamp_funcs["get_nano_time"]
"""The timestamping function to use in benchmarks."""
# TODO: Additional benchmarks which would be useful for riptable development and comparison to other frameworks:
# * mbget vs. numpy fancy indexing (only on valid array indices -- -len(arr) <= x < len(arr))
# * mbget vs. numba-based equivalent to look for compiled code optimizations + thread scaling
# * indexing with a boolean mask (riptable vs. numba)
# * array conversion (i.e. elementwise type conversion) (arr1.astype(np.float32))
# * make sure to include the self-conversion case so that we look for optimizations there (like just calling memcpy)
# * equality and comparisons
# * elementwise array equality (arr1 == arr2)
# * array vs. scalar equality (arr == 123, arr == "foo", arr != '', etc.)
# * elementwise array comparison (arr1 < arr2)
# * array vs. scalar comparison (arr1 < 1.23, arr > 123, etc.)
# * it would also be useful (for demonstration purposes) to demo here how much faster these operations
# are on a string categorical compared to a normal array of strings (like the Categorical's .expand_array).
# * conversion-assignment, e.g. result[:] = arr[:]
def mbget_numba(aValues, aIndex) -> np.ndarray:
"""
Re-implementation of the 'mbget' fancy-indexing function with numba, for comparison with the riptide_cpp implementation.
Parameters
----------
aValues
aIndex
Returns
-------
"""
# make sure a aValues and aIndex are both numpy arrays
if isinstance(aValues, (list, tuple)):
aValues = TypeRegister.FastArray(aValues)
if isinstance(aIndex, (list, tuple)):
aIndex = TypeRegister.FastArray(aIndex)
if not isinstance(aValues, np.ndarray) or not isinstance(aIndex, np.ndarray):
raise TypeError(f"Values and index must be numpy arrays. Got {type(aValues)} {type(aIndex)}")
elif aValues.dtype.char == 'O':
raise TypeError(f"mbget does not support object types")
elif aIndex.dtype.char not in NumpyCharTypes.AllInteger:
raise TypeError(f"indices provided to mbget must be an integer type not {aIndex.dtype}")
if aValues.ndim == 2:
return _mbget_2dims(aValues, aIndex)
# TODO: probably need special code or parameter to set custom default value for NAN_TIME
if aValues.dtype.char in NumpyCharTypes.AllInteger + NumpyCharTypes.AllFloat:
result = _mbget_numeric(aValues, aIndex)
elif aValues.dtype.char in "SU":
result = _mbget_string(aValues, aIndex)
else:
raise Exception(f"mbget can't operate on an array of this type: {aValues.dtype}")
result = TypeRegister.newclassfrominstance(result, aValues)
return result
def _mbget_numeric(aValues, aIndex) -> np.ndarray:
result = empty(len(aIndex), dtype=aValues.dtype)
# Choose different implementation for signed vs. unsigned dtype.
# See comment in mbget_string for details.
_mbget_numeric_impl = _mbget_numeric_unsigned_impl if aIndex.dtype.kind == 'u' else _mbget_numeric_signed_impl
_mbget_numeric_impl(aValues, aIndex, result, result.inv)
return result
@nb.njit(cache=True, parallel=True, nogil=True)
def _mbget_numeric_signed_impl(aValues, aIndex, result, default_val):
num_elmnts = len(aValues)
for i in nb.prange(aIndex.shape[0]):
# This has one less branch (in the code) than the riptide_cpp implementation of mbget,
# because numba handles the negative/wraparound indexing for us. So the conditional logic
# to handle the negative indexing is still there; it may or may not be in the generated
# machine code depending on how numba chooses to generate it.
index = aIndex[i]
result[i] = aValues[index] if -num_elmnts <= index < num_elmnts else default_val
@nb.njit(cache=True, parallel=True, nogil=True)
def _mbget_numeric_unsigned_impl(aValues, aIndex, result, default_val):
num_elmnts = len(aValues)
for i in nb.prange(aIndex.shape[0]):
# This has one less branch (in the code) than the riptide_cpp implementation of mbget,
# because numba handles the negative/wraparound indexing for us. So the conditional logic
# to handle the negative indexing is still there; it may or may not be in the generated
# machine code depending on how numba chooses to generate it.
index = aIndex[i]
result[i] = aValues[index] if index < num_elmnts else default_val
#not using a default value here since we're handling byte strings only (default val. is 0)
def _mbget_string(aValues, aIndex) -> np.ndarray:
result = empty(len(aIndex), dtype=aValues.dtype)
itemsize = aValues.dtype.itemsize // 1 # ASCII
# Choose different implementation for signed vs. unsigned dtype.
# This is both for performance reasons and also because if we try to use the signed implementation
# with unsigned integer types, numba ends up doing extra/unnecessary conversions so the performance
# is poor; for that same reason, numba fails with an error on the uint64 type since it tries to cast
# the index value to a float before we use it as an array index (which isn't allowed).
# TODO: This decision could probably be pushed into the numba JIT-specialized generic so we don't need to choose here?
_mbget_string_impl = _mbget_string_unsigned_impl if aIndex.dtype.kind == 'u' else _mbget_string_signed_impl
_mbget_string_impl(aValues.view(np.uint8), aIndex, result.view(np.uint8), itemsize)
return result
@nb.njit(cache=True, parallel=True, nogil=True)
def _mbget_string_signed_impl(aValues, aIndex, result, itemsize): # byte array
numstrings = aValues.shape[0] // itemsize
for i in nb.prange(aIndex.shape[0]):
index = aIndex[i]
if -numstrings <= index < numstrings:
str_idx = index if index >= 0 else numstrings + aIndex[i]
for j in range(itemsize):
result[itemsize * i + j] = aValues[itemsize * str_idx + j]
else:
for j in range(itemsize):
result[itemsize * i + j] = 0
@nb.njit(cache=True, parallel=True, nogil=True)
def _mbget_string_unsigned_impl(aValues, aIndex, result, itemsize): # byte array
numstrings = aValues.shape[0] // itemsize
for i in nb.prange(aIndex.shape[0]):
index = aIndex[i]
if index < numstrings:
for j in range(itemsize):
result[itemsize * i + j] = aValues[itemsize * index + j]
else:
for j in range(itemsize):
result[itemsize * i + j] = 0
def astype_numba(arr, dst_dtype):
#only supports numeric-to-numeric type conversions
if arr.dtype.char in "SU" or dst_dtype.char in "SU":
raise Exception (f"Only numeric-to-numeric type conversions are supported.")
result = empty(arr.shape[0], dtype=dst_dtype)
_astype_numba(arr, result)
return result
# numba seems to emit poor quality code for this simple loop, and the performance is
# massively worse when parallel=True is specified. (Tested with numba 0.48, 0.50.1)
# Manually splitting the loop so the input data is chunked does not improve the performance either.
@nb.njit(cache=True, parallel=False, nogil=True)
def _astype_numba(arr, result):
for i in nb.prange(len(arr)):
# conversion occurs implicitly, and numba only supports conversion
# between arrays of numeric types.
result[i] = arr[i]
def bench_bool_index(**kwargs) -> Dataset:
warmup_iters = 0
iters = 21
rng_seeds = [12345]
data_dtypes = [
np.int16,
np.int32,
np.float64,
# TODO: Enable these additional data types; they're somewhat slow though, so we'd only want to
# run them under a 'detailed' / 'long-running' scenario
# np.dtype('S4'),
# np.dtype('S10'),
# np.dtype('<U8')
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
true_ratios = [
0.0,
0.2,
0.4,
0.6,
0.8,
1.0
]
setup_params = itertools.product(
rng_seeds,
data_dtypes,
data_lengths,
true_ratios
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
data_dtype,
data_length,
true_ratio,
) in setup_params:
# HACK: Until we have a better approach for supporting non-rectangular parameter spaces,
# or otherwise being able to skip certain combinations of parameters (e.g. because
# they're invalid, non-realistic, or otherwise don't make sense).
# if np.iinfo(index_dtype).max < data_length:
# continue
#
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(data_dtype))
mask = rng.random(data_length) < true_ratio
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
_ = data_array[mask]
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"data_dtype": np.dtype(data_dtype),
"data_length": data_length,
"true_ratio": true_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_bool_index_numpy(**kwargs) -> Dataset:
warmup_iters = 0
iters = 21
rng_seeds = [12345]
data_dtypes = [
np.int16,
np.int32,
np.float64,
# TODO: Enable these additional data types; they're somewhat slow though, so we'd only want to
# run them under a 'detailed' / 'long-running' scenario
# np.dtype('S4'),
# np.dtype('S10'),
# np.dtype('<U8')
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
true_ratios = [
0.0,
0.2,
0.4,
0.6,
0.8,
1.0
]
setup_params = itertools.product(
rng_seeds,
data_dtypes,
data_lengths,
true_ratios
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
data_dtype,
data_length,
true_ratio,
) in setup_params:
# HACK: Until we have a better approach for supporting non-rectangular parameter spaces,
# or otherwise being able to skip certain combinations of parameters (e.g. because
# they're invalid, non-realistic, or otherwise don't make sense).
# if np.iinfo(index_dtype).max < data_length:
# continue
#
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(data_dtype))
if hasattr(data_array, "_np"):
data_array = data_array._np
mask = rng.random(data_length) < true_ratio
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
_ = data_array[mask]
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"data_dtype": np.dtype(data_dtype),
"data_length": data_length,
"true_ratio": true_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_mbget(**kwargs) -> Dataset:
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
data_dtypes = [
np.int16,
np.int32,
np.float64,
# TODO: Enable these additional data types; they're somewhat slow though, so we'd only want to
# run them under a 'detailed' / 'long-running' scenario
np.dtype('S11'),
]
index_dtypes = [
np.int8,
np.uint8,
np.int16,
np.uint16,
np.int32,
np.uint32,
np.int64,
np.uint64,
# TODO: Add float32 / float64 once rand_fancyindex() supports them
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
index_lengths = [10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
data_dtypes,
index_dtypes,
data_lengths,
index_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
data_dtype,
index_dtype,
data_length,
index_length,
invalid_ratio,
) in setup_params:
# HACK: Until we have a better approach for supporting non-rectangular parameter spaces,
# or otherwise being able to skip certain combinations of parameters (e.g. because
# they're invalid, non-realistic, or otherwise don't make sense).
if np.iinfo(index_dtype).max < data_length:
continue
#
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(data_dtype), invalid_ratio=invalid_ratio)
fancyindex = rand_fancyindex(
rng,
index_length,
dtype=np.dtype(index_dtype),
source_arr_len=data_length,
invalid_ratio=invalid_ratio,
)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
mbget(data_array, fancyindex)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"data_dtype": np.dtype(data_dtype),
"index_dtype": np.dtype(index_dtype),
"data_length": data_length,
"index_length": index_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_mbget_numba(**kwargs) -> Dataset:
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 1
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
data_dtypes = [
np.int16,
np.int32,
np.float64,
# TODO: Enable these additional data types; they're somewhat slow though, so we'd only want to
# run them under a 'detailed' / 'long-running' scenario
np.dtype('S11'),
]
index_dtypes = [
np.int8,
np.uint8,
np.int16,
np.uint16,
np.int32,
np.uint32,
np.int64,
np.uint64,
# TODO: Add float32 / float64 once rand_fancyindex() supports them
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
index_lengths = [10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
data_dtypes,
index_dtypes,
data_lengths,
index_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
data_dtype,
index_dtype,
data_length,
index_length,
invalid_ratio,
) in setup_params:
# HACK: Until we have a better approach for supporting non-rectangular parameter spaces,
# or otherwise being able to skip certain combinations of parameters (e.g. because
# they're invalid, non-realistic, or otherwise don't make sense).
if np.iinfo(index_dtype).max < data_length:
continue
#
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(data_dtype), invalid_ratio=invalid_ratio)
fancyindex = rand_fancyindex(
rng,
index_length,
dtype=np.dtype(index_dtype),
source_arr_len=data_length,
invalid_ratio=invalid_ratio,
)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
mbget_numba(data_array, fancyindex)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"data_dtype": np.dtype(data_dtype),
"index_dtype": np.dtype(index_dtype),
"data_length": data_length,
"index_length": index_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_astype(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
src_dtypes = [
np.int16,
np.int32,
np.float64,
# np.dtype('S11'),
]
dst_dtypes = [
np.int16,
np.int32,
np.float64,
# np.dtype('S11'),
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
src_dtypes,
dst_dtypes,
data_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
src_dtype,
dst_dtype,
data_length,
invalid_ratio,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(src_dtype), invalid_ratio=invalid_ratio)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
data_array.astype(dtype=dst_dtype)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"src_dtype": np.dtype(src_dtype),
"dst_dtype": np.dtype(dst_dtype),
"data_length": data_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_astype_numpy(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
src_dtypes = [
np.int16,
np.int32,
np.float64,
# np.dtype('S11'),
]
dst_dtypes = [
np.int16,
np.int32,
np.float64,
# np.dtype('S11'),
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
src_dtypes,
dst_dtypes,
data_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
src_dtype,
dst_dtype,
data_length,
invalid_ratio,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(src_dtype), invalid_ratio=invalid_ratio)
if hasattr(data_array, '_np'):
data_array = data_array._np
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
data_array.astype(dtype=dst_dtype)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"src_dtype": np.dtype(src_dtype),
"dst_dtype": np.dtype(dst_dtype),
"data_length": data_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_astype_numba(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 1
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
src_dtypes = [
np.int16,
np.int32,
np.float64,
]
dst_dtypes = [
np.int16,
np.int32,
np.float64,
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
setup_params = itertools.product(
rng_seeds,
src_dtypes,
dst_dtypes,
data_lengths,
invalid_ratios,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
src_dtype,
dst_dtype,
data_length,
invalid_ratio,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
data_array = rand_array(rng, data_length, dtype=np.dtype(src_dtype), invalid_ratio=invalid_ratio)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
### The actual function invocation ###
astype_numba(data_array, np.dtype(dst_dtype))
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"src_dtype": np.dtype(src_dtype),
"dst_dtype": np.dtype(dst_dtype),
"data_length": data_length,
"invalid_ratio": invalid_ratio
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_compare_ops(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
arr1_dtypes = [
np.int16,
np.int32,
np.float64,
]
arr2_dtypes = [
np.int16,
np.int32,
np.float64,
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
invalid_ratios = [
0.0,
0.01,
0.1,
# TODO: Enable these additional values for the 'detailed' scenario
# 0.5,
# 0.9,
]
ops = [
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.ge,
operator.gt
]
setup_params = itertools.product(
rng_seeds,
arr1_dtypes,
arr2_dtypes,
data_lengths,
invalid_ratios,
ops,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
arr1_dtype,
arr2_dtype,
data_length,
invalid_ratio,
op,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
arr1 = rand_array(rng, data_length, dtype=np.dtype(arr1_dtype), invalid_ratio=invalid_ratio)
arr2 = rand_array(rng, data_length, dtype=np.dtype(arr2_dtype), invalid_ratio=invalid_ratio)
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
#invocation of actual actual function
op(arr1, arr2)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"arr1_dtype": np.dtype(arr1_dtype),
"arr2_dtype": np.dtype(arr2_dtype),
"operation": op.__name__,
"data_length": data_length,
"invalid_ratio": invalid_ratio,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def bench_compare_ops_numpy(**kwargs):
# TODO: Add additional dimensions:
# * number of threads
# * recycler on/off
# * different key multiplicity distributions (in the rand_fancyindex function)
# * different amounts of 'sortedness' of the fancy index (from the rand_fancyindex function)
# Fixed parameters which apply to all of the trials in this benchmark.
warmup_iters = 0
iters = 21 # This duration of this function is (usually) fairly short, so the performance is prone to random noise -- using more iterations helps
# Parameters we'll sweep over for the benchmark.
rng_seeds = [12345]
arr1_dtypes = [
np.int16,
np.int32,
np.float64,
]
arr2_dtypes = [
np.int16,
np.int32,
np.float64,
]
data_lengths = [
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
# TODO: Add 100M, 1G and 2G -- these need to be optional since smaller machines will run out of memory
# and also take longer than typical trials
]
ops = [
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.ge,
operator.gt
]
setup_params = itertools.product(
rng_seeds,
arr1_dtypes,
arr2_dtypes,
data_lengths,
ops,
)
# Datasets containing timing data and parameters from the trials in this benchmark.
benchmark_data: List[Dataset] = []
for (
rng_seed,
arr1_dtype,
arr2_dtype,
data_length,
op,
) in setup_params:
# Setup phase. The data here is used for both the warmup and the real, timed function invocations.
#
# Make sure to re-initialize the RNG each time so we get a repeatable result.
rng = default_rng(rng_seed)
arr1 = rand_array(rng, data_length, dtype=np.dtype(arr1_dtype))._np
arr2 = rand_array(rng, data_length, dtype=np.dtype(arr2_dtype))._np
# Sweep over other parameters that aren't required by the setup phase.
other_params = [None]
for _ in other_params:
# Allocate an array to hold the raw timing data.
# TODO: Change to use TimeSpan?
timing_data = empty(iters, dtype=np.int64)
for is_warmup in (True, False):
loop_count = warmup_iters if is_warmup else iters
for i in range(loop_count):
start_time_ns = timestamper()
#invocation of actual actual function
op(arr1, arr2)
### Store the timing results (if this was a real invocation).
call_nanos = timestamper() - start_time_ns
if not is_warmup:
timing_data[i] = call_nanos
# Create a mini Dataset with the timing results for this run.
# Capture the timing results along with the other options used for the function invocations.
trial_data = create_trial_dataset(
timing_data,
{
# Setup parameters
"rng_seed": rng_seed,
"arr1_dtype": np.dtype(arr1_dtype),
"arr2_dtype": np.dtype(arr2_dtype),
"operation": op.__name__,
"data_length": data_length,
# Other parameters
# (None)
},
)
benchmark_data.append(trial_data)
# hstack all of the individual Datasets together into one large Dataset and return it.
return Dataset.hstack(benchmark_data, destroy=True)
def compare_mbget():
return create_comparison_dataset(
{
"mbget": bench_mbget(),
"mbget_numba": bench_mbget_numba(),
}
)
def compare_astype():
return create_comparison_dataset(
{
"astype": bench_astype(),
"astype_numpy": bench_astype_numpy(),
"astype_numba": bench_astype_numba(),
}
)
def compare_bool_index():
return create_comparison_dataset(
{
"bool_index": bench_bool_index(),
"bool_index_numpy": bench_bool_index_numpy()
}
)
def compare_compare_ops():
return create_comparison_dataset(
{
"compare_ops": bench_compare_ops(),
"compare_ops_numpy": bench_compare_ops_numpy(),
}
) | [
"logging.getLogger",
"numpy.random.default_rng",
"itertools.product",
"numba.njit",
"numpy.iinfo",
"numba.prange",
"numpy.dtype"
] | [((967, 994), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (984, 994), False, 'import logging\n'), ((4286, 4332), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'parallel': '(True)', 'nogil': '(True)'}), '(cache=True, parallel=True, nogil=True)\n', (4293, 4332), True, 'import numba as nb\n'), ((4963, 5009), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'parallel': '(True)', 'nogil': '(True)'}), '(cache=True, parallel=True, nogil=True)\n', (4970, 5009), True, 'import numba as nb\n'), ((6705, 6751), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'parallel': '(True)', 'nogil': '(True)'}), '(cache=True, parallel=True, nogil=True)\n', (6712, 6751), True, 'import numba as nb\n'), ((7290, 7336), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'parallel': '(True)', 'nogil': '(True)'}), '(cache=True, parallel=True, nogil=True)\n', (7297, 7336), True, 'import numba as nb\n'), ((8402, 8449), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'parallel': '(False)', 'nogil': '(True)'}), '(cache=True, parallel=False, nogil=True)\n', (8409, 8449), True, 'import numba as nb\n'), ((4449, 4475), 'numba.prange', 'nb.prange', (['aIndex.shape[0]'], {}), '(aIndex.shape[0])\n', (4458, 4475), True, 'import numba as nb\n'), ((5128, 5154), 'numba.prange', 'nb.prange', (['aIndex.shape[0]'], {}), '(aIndex.shape[0])\n', (5137, 5154), True, 'import numba as nb\n'), ((6894, 6920), 'numba.prange', 'nb.prange', (['aIndex.shape[0]'], {}), '(aIndex.shape[0])\n', (6903, 6920), True, 'import numba as nb\n'), ((7481, 7507), 'numba.prange', 'nb.prange', (['aIndex.shape[0]'], {}), '(aIndex.shape[0])\n', (7490, 7507), True, 'import numba as nb\n'), ((9559, 9627), 'itertools.product', 'itertools.product', (['rng_seeds', 'data_dtypes', 'data_lengths', 'true_ratios'], {}), '(rng_seeds, data_dtypes, data_lengths, true_ratios)\n', (9576, 9627), False, 'import itertools\n'), ((13271, 13339), 'itertools.product', 'itertools.product', (['rng_seeds', 'data_dtypes', 'data_lengths', 'true_ratios'], {}), '(rng_seeds, data_dtypes, data_lengths, true_ratios)\n', (13288, 13339), False, 'import itertools\n'), ((17953, 18057), 'itertools.product', 'itertools.product', (['rng_seeds', 'data_dtypes', 'index_dtypes', 'data_lengths', 'index_lengths', 'invalid_ratios'], {}), '(rng_seeds, data_dtypes, index_dtypes, data_lengths,\n index_lengths, invalid_ratios)\n', (17970, 18057), False, 'import itertools\n'), ((22956, 23060), 'itertools.product', 'itertools.product', (['rng_seeds', 'data_dtypes', 'index_dtypes', 'data_lengths', 'index_lengths', 'invalid_ratios'], {}), '(rng_seeds, data_dtypes, index_dtypes, data_lengths,\n index_lengths, invalid_ratios)\n', (22973, 23060), False, 'import itertools\n'), ((27595, 27681), 'itertools.product', 'itertools.product', (['rng_seeds', 'src_dtypes', 'dst_dtypes', 'data_lengths', 'invalid_ratios'], {}), '(rng_seeds, src_dtypes, dst_dtypes, data_lengths,\n invalid_ratios)\n', (27612, 27681), False, 'import itertools\n'), ((31541, 31627), 'itertools.product', 'itertools.product', (['rng_seeds', 'src_dtypes', 'dst_dtypes', 'data_lengths', 'invalid_ratios'], {}), '(rng_seeds, src_dtypes, dst_dtypes, data_lengths,\n invalid_ratios)\n', (31558, 31627), False, 'import itertools\n'), ((35510, 35596), 'itertools.product', 'itertools.product', (['rng_seeds', 'src_dtypes', 'dst_dtypes', 'data_lengths', 'invalid_ratios'], {}), '(rng_seeds, src_dtypes, dst_dtypes, data_lengths,\n invalid_ratios)\n', (35527, 35596), False, 'import itertools\n'), ((39539, 39632), 'itertools.product', 'itertools.product', (['rng_seeds', 'arr1_dtypes', 'arr2_dtypes', 'data_lengths', 'invalid_ratios', 'ops'], {}), '(rng_seeds, arr1_dtypes, arr2_dtypes, data_lengths,\n invalid_ratios, ops)\n', (39556, 39632), False, 'import itertools\n'), ((43544, 43617), 'itertools.product', 'itertools.product', (['rng_seeds', 'arr1_dtypes', 'arr2_dtypes', 'data_lengths', 'ops'], {}), '(rng_seeds, arr1_dtypes, arr2_dtypes, data_lengths, ops)\n', (43561, 43617), False, 'import itertools\n'), ((10528, 10549), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (10539, 10549), False, 'from numpy.random import default_rng\n'), ((14240, 14261), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (14251, 14261), False, 'from numpy.random import default_rng\n'), ((17083, 17098), 'numpy.dtype', 'np.dtype', (['"""S11"""'], {}), "('S11')\n", (17091, 17098), True, 'import numpy as np\n'), ((19001, 19022), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (19012, 19022), False, 'from numpy.random import default_rng\n'), ((22086, 22101), 'numpy.dtype', 'np.dtype', (['"""S11"""'], {}), "('S11')\n", (22094, 22101), True, 'import numpy as np\n'), ((24026, 24047), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (24037, 24047), False, 'from numpy.random import default_rng\n'), ((28245, 28266), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (28256, 28266), False, 'from numpy.random import default_rng\n'), ((32191, 32212), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (32202, 32212), False, 'from numpy.random import default_rng\n'), ((36160, 36181), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (36171, 36181), False, 'from numpy.random import default_rng\n'), ((40224, 40245), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (40235, 40245), False, 'from numpy.random import default_rng\n'), ((44176, 44197), 'numpy.random.default_rng', 'default_rng', (['rng_seed'], {}), '(rng_seed)\n', (44187, 44197), False, 'from numpy.random import default_rng\n'), ((10609, 10629), 'numpy.dtype', 'np.dtype', (['data_dtype'], {}), '(data_dtype)\n', (10617, 10629), True, 'import numpy as np\n'), ((14321, 14341), 'numpy.dtype', 'np.dtype', (['data_dtype'], {}), '(data_dtype)\n', (14329, 14341), True, 'import numpy as np\n'), ((18704, 18725), 'numpy.iinfo', 'np.iinfo', (['index_dtype'], {}), '(index_dtype)\n', (18712, 18725), True, 'import numpy as np\n'), ((19082, 19102), 'numpy.dtype', 'np.dtype', (['data_dtype'], {}), '(data_dtype)\n', (19090, 19102), True, 'import numpy as np\n'), ((19236, 19257), 'numpy.dtype', 'np.dtype', (['index_dtype'], {}), '(index_dtype)\n', (19244, 19257), True, 'import numpy as np\n'), ((23729, 23750), 'numpy.iinfo', 'np.iinfo', (['index_dtype'], {}), '(index_dtype)\n', (23737, 23750), True, 'import numpy as np\n'), ((24107, 24127), 'numpy.dtype', 'np.dtype', (['data_dtype'], {}), '(data_dtype)\n', (24115, 24127), True, 'import numpy as np\n'), ((24261, 24282), 'numpy.dtype', 'np.dtype', (['index_dtype'], {}), '(index_dtype)\n', (24269, 24282), True, 'import numpy as np\n'), ((28326, 28345), 'numpy.dtype', 'np.dtype', (['src_dtype'], {}), '(src_dtype)\n', (28334, 28345), True, 'import numpy as np\n'), ((32272, 32291), 'numpy.dtype', 'np.dtype', (['src_dtype'], {}), '(src_dtype)\n', (32280, 32291), True, 'import numpy as np\n'), ((36241, 36260), 'numpy.dtype', 'np.dtype', (['src_dtype'], {}), '(src_dtype)\n', (36249, 36260), True, 'import numpy as np\n'), ((40299, 40319), 'numpy.dtype', 'np.dtype', (['arr1_dtype'], {}), '(arr1_dtype)\n', (40307, 40319), True, 'import numpy as np\n'), ((40401, 40421), 'numpy.dtype', 'np.dtype', (['arr2_dtype'], {}), '(arr2_dtype)\n', (40409, 40421), True, 'import numpy as np\n'), ((11947, 11967), 'numpy.dtype', 'np.dtype', (['data_dtype'], {}), '(data_dtype)\n', (11955, 11967), True, 'import numpy as np\n'), ((15740, 15760), 'numpy.dtype', 'np.dtype', (['data_dtype'], {}), '(data_dtype)\n', (15748, 15760), True, 'import numpy as np\n'), ((20625, 20645), 'numpy.dtype', 'np.dtype', (['data_dtype'], {}), '(data_dtype)\n', (20633, 20645), True, 'import numpy as np\n'), ((20683, 20704), 'numpy.dtype', 'np.dtype', (['index_dtype'], {}), '(index_dtype)\n', (20691, 20704), True, 'import numpy as np\n'), ((25656, 25676), 'numpy.dtype', 'np.dtype', (['data_dtype'], {}), '(data_dtype)\n', (25664, 25676), True, 'import numpy as np\n'), ((25714, 25735), 'numpy.dtype', 'np.dtype', (['index_dtype'], {}), '(index_dtype)\n', (25722, 25735), True, 'import numpy as np\n'), ((29652, 29671), 'numpy.dtype', 'np.dtype', (['src_dtype'], {}), '(src_dtype)\n', (29660, 29671), True, 'import numpy as np\n'), ((29707, 29726), 'numpy.dtype', 'np.dtype', (['dst_dtype'], {}), '(dst_dtype)\n', (29715, 29726), True, 'import numpy as np\n'), ((33679, 33698), 'numpy.dtype', 'np.dtype', (['src_dtype'], {}), '(src_dtype)\n', (33687, 33698), True, 'import numpy as np\n'), ((33734, 33753), 'numpy.dtype', 'np.dtype', (['dst_dtype'], {}), '(dst_dtype)\n', (33742, 33753), True, 'import numpy as np\n'), ((37578, 37597), 'numpy.dtype', 'np.dtype', (['src_dtype'], {}), '(src_dtype)\n', (37586, 37597), True, 'import numpy as np\n'), ((37633, 37652), 'numpy.dtype', 'np.dtype', (['dst_dtype'], {}), '(dst_dtype)\n', (37641, 37652), True, 'import numpy as np\n'), ((41708, 41728), 'numpy.dtype', 'np.dtype', (['arr1_dtype'], {}), '(arr1_dtype)\n', (41716, 41728), True, 'import numpy as np\n'), ((41765, 41785), 'numpy.dtype', 'np.dtype', (['arr2_dtype'], {}), '(arr2_dtype)\n', (41773, 41785), True, 'import numpy as np\n'), ((44251, 44271), 'numpy.dtype', 'np.dtype', (['arr1_dtype'], {}), '(arr1_dtype)\n', (44259, 44271), True, 'import numpy as np\n'), ((44328, 44348), 'numpy.dtype', 'np.dtype', (['arr2_dtype'], {}), '(arr2_dtype)\n', (44336, 44348), True, 'import numpy as np\n'), ((45610, 45630), 'numpy.dtype', 'np.dtype', (['arr1_dtype'], {}), '(arr1_dtype)\n', (45618, 45630), True, 'import numpy as np\n'), ((45667, 45687), 'numpy.dtype', 'np.dtype', (['arr2_dtype'], {}), '(arr2_dtype)\n', (45675, 45687), True, 'import numpy as np\n'), ((36919, 36938), 'numpy.dtype', 'np.dtype', (['dst_dtype'], {}), '(dst_dtype)\n', (36927, 36938), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import tensorflow as tf
import tensorflow_probability
import mvg_distributions.covariance_representations as cov_rep
from mvg_distributions.sqrt_gamma_gaussian import SqrtGammaGaussian, SparseSqrtGammaGaussian
from mvg_distributions.test.test_losses_base import LossesTestBase
tfd = tensorflow_probability.distributions
tfb = tensorflow_probability.bijectors
class TestSqrtGammaGaussian(LossesTestBase):
def setUp(self):
super().setUp()
self.x, self.x_cov_obj, self.sqrt_w_tfd, self.sqrt_gamma_gaussian = self._create_single_sqrt_wishart_pair()
def _create_single_sqrt_wishart_pair(self, add_sparse_gamma=False):
# Create a random scale matrix for the Wishart distribution
diag_precision_prior = np.abs(np.random.normal(size=(self.batch_size, self.features_size)))
diag_precision_prior = diag_precision_prior.astype(self.dtype.as_numpy_dtype)
precision_prior = np.zeros(shape=(self.batch_size, self.features_size, self.features_size),
dtype=self.dtype.as_numpy_dtype)
for i in range(self.batch_size):
precision_prior[i][np.diag_indices_from(precision_prior[i])] = diag_precision_prior[i]
log_diag_precision_prior = np.log(diag_precision_prior)
# Create a random vector of degrees of freedom, whose values must be larger than features_size
df = np.random.uniform(low=self.features_size, high=self.features_size * 10, size=self.batch_size)
df = df.astype(self.dtype.as_numpy_dtype)
# Create a square root Wishart distribution using bijectors
wishart = tfd.Wishart(scale=precision_prior, df=df)
cholesky_bijector = tfb.Invert(tfb.CholeskyOuterProduct())
sqrt_wishart_tfd = tfd.TransformedDistribution(distribution=wishart, bijector=cholesky_bijector)
# Create our custom square root Wishart distribution with the same parameters
sqrt_gamma_gaussian = SqrtGammaGaussian(df=df, log_diag_scale=log_diag_precision_prior)
if add_sparse_gamma:
sparse_sqrt_gamma_gaussian = SparseSqrtGammaGaussian(df=df, log_diag_scale=log_diag_precision_prior)
# Create a random Cholesky matrix to test the probability density functions
_, __, x_covariance, x_weights, x_basis, log_diag = self._random_normal_params(cov_rep.PrecisionConvCholFilters)
x = np.linalg.cholesky(np.linalg.inv(x_covariance))
# Our custom square root Wishart is optimized to work with PrecisionConvCholFilters, it will measure
# the pdf of the Cholesky of the Precision
img_w = int(np.sqrt(self.features_size))
sample_shape = tf.TensorShape((self.batch_size, img_w, img_w, 1))
x_cov_obj = cov_rep.PrecisionConvCholFilters(weights_precision=tf.constant(x_weights),
filters_precision=tf.constant(x_basis),
sample_shape=sample_shape)
x_cov_obj.log_diag_chol_precision = log_diag
if add_sparse_gamma:
return x, x_cov_obj, sqrt_wishart_tfd, sqrt_gamma_gaussian, sparse_sqrt_gamma_gaussian
else:
return x, x_cov_obj, sqrt_wishart_tfd, sqrt_gamma_gaussian
def test_log_prob(self):
# Test that square root Gamma Gaussian is the same as a Cholesky Wishart
log_prob1 = self.sqrt_w_tfd.log_prob(self.x)
x_with_log_diag = tf.matrix_set_diag(self.x, self.x_cov_obj.log_diag_chol_precision)
log_prob2 = self.sqrt_gamma_gaussian.log_prob(x_with_log_diag)
x_with_log_diag = tf.matrix_set_diag(self.x_cov_obj.chol_precision, self.x_cov_obj.log_diag_chol_precision)
log_prob4 = self.sqrt_gamma_gaussian.log_prob(x_with_log_diag)
self._asset_allclose_tf_feed(log_prob1, log_prob2)
self._asset_allclose_tf_feed(log_prob1, log_prob4)
def test_samples(self):
# Test that square root Gamma Gaussian is the same as a Cholesky Wishart
sample1 = self.sqrt_w_tfd.sample(seed=0)
sample2 = self.sqrt_gamma_gaussian.sample(seed=0)
sample2 = tf.matrix_set_diag(sample2, tf.exp(tf.matrix_diag_part(sample2)))
self._asset_allclose_tf_feed(sample1, sample2)
class TestSparseSqrtGammaGaussian(TestSqrtGammaGaussian):
def setUp(self):
LossesTestBase.setUp(self)
outputs = self._create_single_sqrt_wishart_pair(add_sparse_gamma=True)
self.x, self.x_cov_obj, self.sqrt_w_tfd, self.sqrt_gamma_gaussian_dense, self.sqrt_gamma_gaussian = outputs
def test_log_prob(self):
# Test that square root Gamma Gaussian with dense matrices is the same as a Cholesky Wishart
log_prob1 = self.sqrt_w_tfd.log_prob(self.x)
log_prob2 = self.sqrt_gamma_gaussian.log_prob(self.x)
log_prob4 = self.sqrt_gamma_gaussian.log_prob(self.x_cov_obj.chol_precision)
self._asset_allclose_tf_feed(log_prob1, log_prob2)
self._asset_allclose_tf_feed(log_prob1, log_prob4)
def test_log_prob_sparse(self):
# Test that square root Gamma Gaussian with sparse matrices is the same as a the dense version,
# when the sparse elements are removed afterwards
x_with_log_diag = tf.matrix_set_diag(self.x, self.x_cov_obj.log_diag_chol_precision)
log_prob1_gamma = self.sqrt_gamma_gaussian_dense._log_prob_sqrt_gamma(x_with_log_diag)
log_prob1_normal = self.sqrt_gamma_gaussian_dense.normal_dist.log_prob(self.x)
off_diag_mask = self.x_cov_obj.np_off_diag_mask() # Zero out off-diagonal terms
log_prob1_normal = tf.reduce_sum(log_prob1_normal * off_diag_mask, axis=[1, 2])
log_prob1 = log_prob1_gamma + log_prob1_normal
log_prob2 = self.sqrt_gamma_gaussian.log_prob(self.x_cov_obj)
self._asset_allclose_tf_feed(log_prob1, log_prob2)
@unittest.skip
def test_samples(self):
pass
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.normal",
"numpy.diag_indices_from",
"mvg_distributions.sqrt_gamma_gaussian.SqrtGammaGaussian",
"numpy.sqrt",
"tensorflow.matrix_diag_part",
"tensorflow.reduce_sum",
"mvg_distributions.sqrt_gamma_gaussian.SparseSqrtGammaGaussian",
"numpy.log",
"numpy.zeros",
"numpy.linalg.inv",
"ten... | [((5965, 5980), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5978, 5980), False, 'import unittest\n'), ((958, 1068), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.batch_size, self.features_size, self.features_size)', 'dtype': 'self.dtype.as_numpy_dtype'}), '(shape=(self.batch_size, self.features_size, self.features_size),\n dtype=self.dtype.as_numpy_dtype)\n', (966, 1068), True, 'import numpy as np\n'), ((1275, 1303), 'numpy.log', 'np.log', (['diag_precision_prior'], {}), '(diag_precision_prior)\n', (1281, 1303), True, 'import numpy as np\n'), ((1421, 1518), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.features_size', 'high': '(self.features_size * 10)', 'size': 'self.batch_size'}), '(low=self.features_size, high=self.features_size * 10,\n size=self.batch_size)\n', (1438, 1518), True, 'import numpy as np\n'), ((1983, 2048), 'mvg_distributions.sqrt_gamma_gaussian.SqrtGammaGaussian', 'SqrtGammaGaussian', ([], {'df': 'df', 'log_diag_scale': 'log_diag_precision_prior'}), '(df=df, log_diag_scale=log_diag_precision_prior)\n', (2000, 2048), False, 'from mvg_distributions.sqrt_gamma_gaussian import SqrtGammaGaussian, SparseSqrtGammaGaussian\n'), ((2690, 2740), 'tensorflow.TensorShape', 'tf.TensorShape', (['(self.batch_size, img_w, img_w, 1)'], {}), '((self.batch_size, img_w, img_w, 1))\n', (2704, 2740), True, 'import tensorflow as tf\n'), ((3467, 3533), 'tensorflow.matrix_set_diag', 'tf.matrix_set_diag', (['self.x', 'self.x_cov_obj.log_diag_chol_precision'], {}), '(self.x, self.x_cov_obj.log_diag_chol_precision)\n', (3485, 3533), True, 'import tensorflow as tf\n'), ((3632, 3726), 'tensorflow.matrix_set_diag', 'tf.matrix_set_diag', (['self.x_cov_obj.chol_precision', 'self.x_cov_obj.log_diag_chol_precision'], {}), '(self.x_cov_obj.chol_precision, self.x_cov_obj.\n log_diag_chol_precision)\n', (3650, 3726), True, 'import tensorflow as tf\n'), ((4359, 4385), 'mvg_distributions.test.test_losses_base.LossesTestBase.setUp', 'LossesTestBase.setUp', (['self'], {}), '(self)\n', (4379, 4385), False, 'from mvg_distributions.test.test_losses_base import LossesTestBase\n'), ((5257, 5323), 'tensorflow.matrix_set_diag', 'tf.matrix_set_diag', (['self.x', 'self.x_cov_obj.log_diag_chol_precision'], {}), '(self.x, self.x_cov_obj.log_diag_chol_precision)\n', (5275, 5323), True, 'import tensorflow as tf\n'), ((5623, 5683), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_prob1_normal * off_diag_mask)'], {'axis': '[1, 2]'}), '(log_prob1_normal * off_diag_mask, axis=[1, 2])\n', (5636, 5683), True, 'import tensorflow as tf\n'), ((784, 844), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.batch_size, self.features_size)'}), '(size=(self.batch_size, self.features_size))\n', (800, 844), True, 'import numpy as np\n'), ((2119, 2190), 'mvg_distributions.sqrt_gamma_gaussian.SparseSqrtGammaGaussian', 'SparseSqrtGammaGaussian', ([], {'df': 'df', 'log_diag_scale': 'log_diag_precision_prior'}), '(df=df, log_diag_scale=log_diag_precision_prior)\n', (2142, 2190), False, 'from mvg_distributions.sqrt_gamma_gaussian import SqrtGammaGaussian, SparseSqrtGammaGaussian\n'), ((2428, 2455), 'numpy.linalg.inv', 'np.linalg.inv', (['x_covariance'], {}), '(x_covariance)\n', (2441, 2455), True, 'import numpy as np\n'), ((2638, 2665), 'numpy.sqrt', 'np.sqrt', (['self.features_size'], {}), '(self.features_size)\n', (2645, 2665), True, 'import numpy as np\n'), ((1172, 1212), 'numpy.diag_indices_from', 'np.diag_indices_from', (['precision_prior[i]'], {}), '(precision_prior[i])\n', (1192, 1212), True, 'import numpy as np\n'), ((2812, 2834), 'tensorflow.constant', 'tf.constant', (['x_weights'], {}), '(x_weights)\n', (2823, 2834), True, 'import tensorflow as tf\n'), ((2907, 2927), 'tensorflow.constant', 'tf.constant', (['x_basis'], {}), '(x_basis)\n', (2918, 2927), True, 'import tensorflow as tf\n'), ((4183, 4211), 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['sample2'], {}), '(sample2)\n', (4202, 4211), True, 'import tensorflow as tf\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.