code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# External
import math
import numpy
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from matplotlib.offsetbox import AnchoredText
# Local
from .utils import gaussian_fit, freq_content
plt.style.use('seaborn')
plt.rc('font', size=15)
plt.rc('axes', labelsize=15)
plt.rc('legend', fontsize=15)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
def full_fiber(data):
# Initialize the figure
fig,ax = plt.subplots(figsize=(9,6))
ax.grid(False)
# Plot original image
plt.imshow(abs(numpy.array(data).T),extent=[0,data.shape[0],data.shape[1]/500,0],cmap='inferno',aspect='auto',norm=LogNorm())
ax.axvline(4650,color='cyan',lw=3)
ax.axvline(4850,color='cyan',lw=3)
ax.axvline(5500,color='yellow',lw=3)
ax.axvline(6000,color='yellow',lw=3)
ax.xaxis.set_ticks_position('top')
ax.xaxis.set_label_position('top')
plt.xlabel('Channels',labelpad=10)
plt.ylabel('Time [second]')
plt.colorbar(pad=0.02,orientation="horizontal").set_label('DAS units (proportional to strain rate)')
plt.tight_layout()
plt.savefig('abs_data.png')
plt.show()
plt.close()
def regions(data1,data2):
# Initialize figure
fig,ax = plt.subplots(1,2,figsize=(18,5.5))
# Plot coherent surface wave patterns
im = ax[0].imshow(data1,extent=[0,data1.shape[1],200,0],cmap='seismic',aspect='auto',vmin=-1000,vmax=1000,interpolation='bicubic')
ax[0].xaxis.set_ticks_position('top')
ax[0].xaxis.set_label_position('top')
ax[0].set_xlabel('Samples',labelpad=10)
ax[0].set_ylabel('Channels')
# Display colorbar
divider = make_axes_locatable(ax[0])
cax = divider.append_axes('bottom', size='5%', pad=0.05)
plt.colorbar(im, pad=0.02, cax=cax, orientation='horizontal').set_label('Raw measurement amplitude')
# Plot non-coherent signals
im = ax[1].imshow(data2,extent=[0,data2.shape[1],200,0],cmap='seismic',aspect='auto',vmin=-1000,vmax=1000,interpolation='bicubic')
ax[1].xaxis.set_ticks_position('top')
ax[1].xaxis.set_label_position('top')
ax[1].set_xlabel('Samples',labelpad=10)
ax[1].set_ylabel('Channels')
# Display colorbar
divider = make_axes_locatable(ax[1])
cax = divider.append_axes('bottom', size='5%', pad=0.05)
plt.colorbar(im, pad=0.02, cax=cax, orientation='horizontal').set_label('Raw measurement amplitude')
# Save and show figure
plt.tight_layout()
plt.savefig('raw_data.pdf')
plt.show()
plt.close()
def plot_dist(data,bins=400,xlim=[-1000,1000]):
fig,ax = plt.subplots(2,1,figsize=(9,8),sharey=True,sharex=True)
for i,order in enumerate([1,2]):
hist = ax[i].hist(data.reshape(numpy.prod(data.shape)),bins=bins,range=xlim,color='white',histtype='stepfilled',edgecolor='black',lw=0.5)
# Fit double gaussian
x = numpy.array([0.5 * (hist[1][i] + hist[1][i+1]) for i in range(len(hist[1])-1)])
y = hist[0]
x, y, chisq, aic, popt = gaussian_fit(x,y,order)
if order==1:
ax[i].plot(x, y[0], lw=2,label='Single-gaussian fit\n$\chi^2_\mathrm{red}=$%.1e / $\mathrm{AIC}=%i$\n$\mu=%.2f, \sigma=%.3f$'%(chisq,aic,popt[1],abs(popt[2])))
if order==2:
ax[i].plot(x, y[0], lw=2,label='Double-gaussian fit\n$\chi^2_\mathrm{red}=$%.1e / $\mathrm{AIC}=%i$'%(chisq,aic))
# Plot first gaussian
# y = gauss_single(x, *popt[:3])
ax[i].plot(x, y[1], lw=2,label=r'$\mu=%.2f, \sigma=%.3f$'%(popt[1],abs(popt[2])))
# Plot second gaussian
# y = gauss_single(x, *popt[3:])
ax[i].plot(x, y[2], lw=2,label=r'$\mu=%.2f, \sigma=%.3f$'%(popt[4],abs(popt[5])))
ax[i].ticklabel_format(axis="y", style="sci", scilimits=(0,0))
ax[i].set_xlim(-1000,1000)
ax[i].legend(loc='upper left')
ax[i].set_ylabel('Density')
plt.xlabel('Raw measurement amplitude')
plt.tight_layout()
plt.savefig('distribution.pdf')
def plot_freq_content(data,img_size=200,sample_rate=500):
plt.rc('font', size=12)
plt.rc('axes', labelsize=12)
plt.rc('legend', fontsize=12)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
fig,ax = plt.subplots(4,4,figsize=(12,12))
for n,img in enumerate(data):
ffts, freqs, avg_fft = freq_content(img,img_size,sample_rate)
img_max = abs(img).max()
# Show larger image
ax[0][n].imshow(img,cmap='seismic',extent=[0,img_size,img_size,0],vmin=-img_max,vmax=img_max,interpolation='bicubic')
ax[0][n].set_xlabel('Sample')
if n==0: ax[0][n].set_ylabel('Channel')
# Plotting data distribution
ax[1][n].hist(img.reshape(numpy.prod(img.shape)),bins=50)
at = AnchoredText('$\sigma=%i$'%numpy.std(img),prop=dict(size=12),loc='upper left')
ax[1][n].add_artist(at)
ax[1][n].set_xlabel('Strain Measurement')
if n==0: ax[1][n].set_ylabel('Density')
# D2 and plot FFT for each channel
ax[2][n].imshow(ffts,extent=[0,sample_rate//2,img.shape[0],0],aspect='auto',norm=LogNorm(vmin=ffts.min(),vmax=ffts.max()),cmap='jet')
ax[2][n].set_xlabel('Frequency (Hz)')
if n==0: ax[2][n].set_ylabel('Channels')\
# Plot average amplitude for each frequency
ax[3][n].plot(freqs,avg_fft)
ax[3][n].set_xlabel('Frequency (Hz)')
ax[3][n].set_xlim(0,sample_rate//2)
ax[3][n].axvline(40,ls='--',color='black',lw=1.3)
ax[3][n].set_ylabel('Average Spectral Amplitude')
plt.tight_layout(h_pad=0,w_pad=0)
plt.savefig('signal_types.pdf')
plt.show()
def latent_plot(models,loader):
fig, ax = plt.subplots(3,2,figsize=(10,12),sharex=True,sharey=True)
for n,(i,j) in enumerate([[0,0],[0,1],[1,0],[1,1],[2,0],[2,1]]):
model_epoch = models[n]
model_epoch.eval()
for batch_idx, (data,target) in enumerate(loader):
data = data.float()
z, recon_batch, mu, logvar = model_epoch(data.view(-1,numpy.prod(data.shape[-2:])))
z = z.data.cpu().numpy()
ax[i][j].scatter(z[:,0],z[:,1],s=10,c=target,cmap='cool',alpha=0.5)
ax[i][j].set_title('Epoch %i'%(n+1),fontsize=15)
if i==2: ax[i][j].set_xlabel('Latent variable 1')
if j==0: ax[i][j].set_ylabel('Latent variable 2')
plt.tight_layout()
plt.savefig('clustering.pdf')
plt.show() | [
"numpy.prod",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"mpl_toolkits.axes_grid1.axes_divider.make_axes_locatable",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.tight_layout",
... | [((277, 301), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (290, 301), True, 'import matplotlib.pyplot as plt\n'), ((302, 325), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(15)'}), "('font', size=15)\n", (308, 325), True, 'import matplotlib.pyplot as plt\n'), ((326, 354), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(15)'}), "('axes', labelsize=15)\n", (332, 354), True, 'import matplotlib.pyplot as plt\n'), ((355, 384), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(15)'}), "('legend', fontsize=15)\n", (361, 384), True, 'import matplotlib.pyplot as plt\n'), ((385, 414), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(15)'}), "('xtick', labelsize=15)\n", (391, 414), True, 'import matplotlib.pyplot as plt\n'), ((415, 444), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(15)'}), "('ytick', labelsize=15)\n", (421, 444), True, 'import matplotlib.pyplot as plt\n'), ((505, 533), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (517, 533), True, 'import matplotlib.pyplot as plt\n'), ((930, 965), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Channels"""'], {'labelpad': '(10)'}), "('Channels', labelpad=10)\n", (940, 965), True, 'import matplotlib.pyplot as plt\n'), ((967, 994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time [second]"""'], {}), "('Time [second]')\n", (977, 994), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1118), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1116, 1118), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1148), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""abs_data.png"""'], {}), "('abs_data.png')\n", (1132, 1148), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1161), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1159, 1161), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1175), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1173, 1175), True, 'import matplotlib.pyplot as plt\n'), ((1236, 1273), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(18, 5.5)'}), '(1, 2, figsize=(18, 5.5))\n', (1248, 1273), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1656), 'mpl_toolkits.axes_grid1.axes_divider.make_axes_locatable', 'make_axes_locatable', (['ax[0]'], {}), '(ax[0])\n', (1649, 1656), False, 'from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\n'), ((2168, 2194), 'mpl_toolkits.axes_grid1.axes_divider.make_axes_locatable', 'make_axes_locatable', (['ax[1]'], {}), '(ax[1])\n', (2187, 2194), False, 'from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\n'), ((2384, 2402), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2400, 2402), True, 'import matplotlib.pyplot as plt\n'), ((2405, 2432), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""raw_data.pdf"""'], {}), "('raw_data.pdf')\n", (2416, 2432), True, 'import matplotlib.pyplot as plt\n'), ((2435, 2445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2443, 2445), True, 'import matplotlib.pyplot as plt\n'), ((2448, 2459), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2457, 2459), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2580), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(9, 8)', 'sharey': '(True)', 'sharex': '(True)'}), '(2, 1, figsize=(9, 8), sharey=True, sharex=True)\n', (2532, 2580), True, 'import matplotlib.pyplot as plt\n'), ((3736, 3775), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Raw measurement amplitude"""'], {}), "('Raw measurement amplitude')\n", (3746, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3778, 3796), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3794, 3796), True, 'import matplotlib.pyplot as plt\n'), ((3799, 3830), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""distribution.pdf"""'], {}), "('distribution.pdf')\n", (3810, 3830), True, 'import matplotlib.pyplot as plt\n'), ((3892, 3915), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(12)'}), "('font', size=12)\n", (3898, 3915), True, 'import matplotlib.pyplot as plt\n'), ((3918, 3946), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(12)'}), "('axes', labelsize=12)\n", (3924, 3946), True, 'import matplotlib.pyplot as plt\n'), ((3949, 3978), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(12)'}), "('legend', fontsize=12)\n", (3955, 3978), True, 'import matplotlib.pyplot as plt\n'), ((3981, 4010), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(12)'}), "('xtick', labelsize=12)\n", (3987, 4010), True, 'import matplotlib.pyplot as plt\n'), ((4013, 4042), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(12)'}), "('ytick', labelsize=12)\n", (4019, 4042), True, 'import matplotlib.pyplot as plt\n'), ((4054, 4090), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(12, 12)'}), '(4, 4, figsize=(12, 12))\n', (4066, 4090), True, 'import matplotlib.pyplot as plt\n'), ((5278, 5312), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(0)', 'w_pad': '(0)'}), '(h_pad=0, w_pad=0)\n', (5294, 5312), True, 'import matplotlib.pyplot as plt\n'), ((5314, 5345), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""signal_types.pdf"""'], {}), "('signal_types.pdf')\n", (5325, 5345), True, 'import matplotlib.pyplot as plt\n'), ((5348, 5358), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5356, 5358), True, 'import matplotlib.pyplot as plt\n'), ((5404, 5466), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'figsize': '(10, 12)', 'sharex': '(True)', 'sharey': '(True)'}), '(3, 2, figsize=(10, 12), sharex=True, sharey=True)\n', (5416, 5466), True, 'import matplotlib.pyplot as plt\n'), ((6025, 6043), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6041, 6043), True, 'import matplotlib.pyplot as plt\n'), ((6046, 6075), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""clustering.pdf"""'], {}), "('clustering.pdf')\n", (6057, 6075), True, 'import matplotlib.pyplot as plt\n'), ((6078, 6088), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6086, 6088), True, 'import matplotlib.pyplot as plt\n'), ((691, 700), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (698, 700), False, 'from matplotlib.colors import LogNorm\n'), ((997, 1045), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'pad': '(0.02)', 'orientation': '"""horizontal"""'}), "(pad=0.02, orientation='horizontal')\n", (1009, 1045), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1779), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'pad': '(0.02)', 'cax': 'cax', 'orientation': '"""horizontal"""'}), "(im, pad=0.02, cax=cax, orientation='horizontal')\n", (1730, 1779), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2317), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'pad': '(0.02)', 'cax': 'cax', 'orientation': '"""horizontal"""'}), "(im, pad=0.02, cax=cax, orientation='horizontal')\n", (2268, 2317), True, 'import matplotlib.pyplot as plt\n'), ((591, 608), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (602, 608), False, 'import numpy\n'), ((2646, 2668), 'numpy.prod', 'numpy.prod', (['data.shape'], {}), '(data.shape)\n', (2656, 2668), False, 'import numpy\n'), ((4502, 4523), 'numpy.prod', 'numpy.prod', (['img.shape'], {}), '(img.shape)\n', (4512, 4523), False, 'import numpy\n'), ((4570, 4584), 'numpy.std', 'numpy.std', (['img'], {}), '(img)\n', (4579, 4584), False, 'import numpy\n'), ((5721, 5748), 'numpy.prod', 'numpy.prod', (['data.shape[-2:]'], {}), '(data.shape[-2:])\n', (5731, 5748), False, 'import numpy\n')] |
"""
Script that trains multitask models on hiv dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
from hiv_datasets import load_hiv
# Only for debug!
np.random.seed(123)
# Load hiv dataset
n_features = 512
hiv_tasks, hiv_datasets, transformers = load_hiv()
train_dataset, valid_dataset, test_dataset = hiv_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
transformer = dc.trans.IRVTransformer(10, len(hiv_tasks), train_dataset)
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
model = dc.models.TensorflowMultiTaskIRVClassifier(
len(hiv_tasks), K=10, batch_size=50, learning_rate=0.001)
# Fit trained model
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| [
"hiv_datasets.load_hiv",
"numpy.random.seed",
"deepchem.metrics.Metric"
] | [((277, 296), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (291, 296), True, 'import numpy as np\n'), ((378, 388), 'hiv_datasets.load_hiv', 'load_hiv', ([], {}), '()\n', (386, 388), False, 'from hiv_datasets import load_hiv\n'), ((474, 526), 'deepchem.metrics.Metric', 'dc.metrics.Metric', (['dc.metrics.roc_auc_score', 'np.mean'], {}), '(dc.metrics.roc_auc_score, np.mean)\n', (491, 526), True, 'import deepchem as dc\n')] |
"""Slightly customized versions of numpy / scipy linalg methods.
The standard numpy and scipy linalg routines both cope badly with
0-dimensional matrices or vectors. This module wraps several standard
routines to check for these special cases.
"""
# Copyright 2011, 2012, 2013, 2014, 2015 <NAME>
# This file is part of armspeech.
# See `License` for details of license and warranty.
import numpy as np
import numpy.linalg as la
import scipy.linalg as sla
from codedep import codeDeps
import armspeech.numpy_settings
@codeDeps()
def inv(*args, **kwargs):
a = args[0] if len(args) > 0 else kwargs['a']
if np.shape(a) == (0, 0):
return np.eye(0)
else:
return la.inv(*args, **kwargs)
@codeDeps()
def pinv(*args, **kwargs):
a = args[0] if len(args) > 0 else kwargs['a']
if np.shape(a) == (0, 0):
return np.eye(0)
else:
return la.pinv(*args, **kwargs)
@codeDeps()
def solve(*args, **kwargs):
a = args[0] if len(args) > 0 else kwargs['a']
b = args[1] if len(args) > 1 else kwargs['b']
if np.shape(a) == (0, 0) and np.shape(b)[0] == 0:
return np.zeros(np.shape(b))
else:
return la.solve(*args, **kwargs)
@codeDeps()
def cholesky(*args, **kwargs):
a = args[0] if len(args) > 0 else kwargs['a']
if np.shape(a) == (0, 0):
return np.eye(0)
else:
return sla.cholesky(*args, **kwargs)
@codeDeps()
def cho_solve(*args, **kwargs):
c, lower = args[0]
b = args[1] if len(args) > 1 else kwargs['b']
if np.shape(c) == (0, 0) and np.shape(b) == (0,):
return np.zeros(np.shape(b))
else:
return sla.cho_solve(*args, **kwargs)
# (not strictly speaking in linalg but whatever)
@codeDeps()
def tensordot(*args, **kwargs):
a = args[0] if len(args) > 0 else kwargs['a']
b = args[1] if len(args) > 1 else kwargs['b']
axes = args[2] if len(args) > 2 else kwargs['axes']
# (FIXME : specific to axes being an integer. Make more general.) (N.B. default numpy routine copes fine with axes == 0)
if np.shape(axes) == () and axes > 0 and sum(np.shape(a)[-axes:]) == 0 and sum(np.shape(b)[:axes]) == 0:
return np.zeros(np.shape(a)[:-axes] + np.shape(b)[axes:])
else:
return np.tensordot(*args, **kwargs)
| [
"numpy.eye",
"numpy.linalg.solve",
"scipy.linalg.cho_solve",
"numpy.linalg.pinv",
"codedep.codeDeps",
"numpy.tensordot",
"scipy.linalg.cholesky",
"numpy.linalg.inv",
"numpy.shape"
] | [((524, 534), 'codedep.codeDeps', 'codeDeps', ([], {}), '()\n', (532, 534), False, 'from codedep import codeDeps\n'), ((717, 727), 'codedep.codeDeps', 'codeDeps', ([], {}), '()\n', (725, 727), False, 'from codedep import codeDeps\n'), ((912, 922), 'codedep.codeDeps', 'codeDeps', ([], {}), '()\n', (920, 922), False, 'from codedep import codeDeps\n'), ((1195, 1205), 'codedep.codeDeps', 'codeDeps', ([], {}), '()\n', (1203, 1205), False, 'from codedep import codeDeps\n'), ((1399, 1409), 'codedep.codeDeps', 'codeDeps', ([], {}), '()\n', (1407, 1409), False, 'from codedep import codeDeps\n'), ((1713, 1723), 'codedep.codeDeps', 'codeDeps', ([], {}), '()\n', (1721, 1723), False, 'from codedep import codeDeps\n'), ((618, 629), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (626, 629), True, 'import numpy as np\n'), ((656, 665), 'numpy.eye', 'np.eye', (['(0)'], {}), '(0)\n', (662, 665), True, 'import numpy as np\n'), ((691, 714), 'numpy.linalg.inv', 'la.inv', (['*args'], {}), '(*args, **kwargs)\n', (697, 714), True, 'import numpy.linalg as la\n'), ((812, 823), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (820, 823), True, 'import numpy as np\n'), ((850, 859), 'numpy.eye', 'np.eye', (['(0)'], {}), '(0)\n', (856, 859), True, 'import numpy as np\n'), ((885, 909), 'numpy.linalg.pinv', 'la.pinv', (['*args'], {}), '(*args, **kwargs)\n', (892, 909), True, 'import numpy.linalg as la\n'), ((1167, 1192), 'numpy.linalg.solve', 'la.solve', (['*args'], {}), '(*args, **kwargs)\n', (1175, 1192), True, 'import numpy.linalg as la\n'), ((1294, 1305), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1302, 1305), True, 'import numpy as np\n'), ((1332, 1341), 'numpy.eye', 'np.eye', (['(0)'], {}), '(0)\n', (1338, 1341), True, 'import numpy as np\n'), ((1367, 1396), 'scipy.linalg.cholesky', 'sla.cholesky', (['*args'], {}), '(*args, **kwargs)\n', (1379, 1396), True, 'import scipy.linalg as sla\n'), ((1631, 1661), 'scipy.linalg.cho_solve', 'sla.cho_solve', (['*args'], {}), '(*args, **kwargs)\n', (1644, 1661), True, 'import scipy.linalg as sla\n'), ((2237, 2266), 'numpy.tensordot', 'np.tensordot', (['*args'], {}), '(*args, **kwargs)\n', (2249, 2266), True, 'import numpy as np\n'), ((1058, 1069), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1066, 1069), True, 'import numpy as np\n'), ((1129, 1140), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (1137, 1140), True, 'import numpy as np\n'), ((1522, 1533), 'numpy.shape', 'np.shape', (['c'], {}), '(c)\n', (1530, 1533), True, 'import numpy as np\n'), ((1548, 1559), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (1556, 1559), True, 'import numpy as np\n'), ((1593, 1604), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (1601, 1604), True, 'import numpy as np\n'), ((2044, 2058), 'numpy.shape', 'np.shape', (['axes'], {}), '(axes)\n', (2052, 2058), True, 'import numpy as np\n'), ((1084, 1095), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (1092, 1095), True, 'import numpy as np\n'), ((2086, 2097), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (2094, 2097), True, 'import numpy as np\n'), ((2120, 2131), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (2128, 2131), True, 'import numpy as np\n'), ((2170, 2181), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (2178, 2181), True, 'import numpy as np\n'), ((2192, 2203), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (2200, 2203), True, 'import numpy as np\n')] |
import unittest
import FrictionQPotFEM
import GMatElastoPlasticQPot.Cartesian2d as GMat
import GooseFEM
import numpy as np
class test_Generic2d(unittest.TestCase):
"""
Tests
"""
def test_eventDrivenSimpleShear(self):
"""
Simple test of event driven simple shear in a homogeneous system:
Load forward and backward (for the latter: test current implementation limitation).
"""
mesh = GooseFEM.Mesh.Quad4.Regular(3, 3)
nelem = mesh.nelem()
coor = mesh.coor()
dofs = mesh.dofs()
dofs[mesh.nodesLeftOpenEdge(), ...] = dofs[mesh.nodesRightOpenEdge(), ...]
system = FrictionQPotFEM.Generic2d.System(
coor,
mesh.conn(),
dofs,
dofs[np.concatenate((mesh.nodesBottomEdge(), mesh.nodesTopEdge())), :].ravel(),
[0, 1, 2, 6, 7, 8],
[3, 4, 5],
)
nelas = system.elastic().size
nplas = system.plastic().size
epsy = np.cumsum(np.ones((nplas, 5)), axis=1)
system.setMassMatrix(np.ones(nelem))
system.setDampingMatrix(np.ones(nelem))
system.setElastic(np.ones(nelas), np.ones(nelas))
system.setPlastic(np.ones(nplas), np.ones(nplas), epsy)
system.setDt(1.0)
delta_u = np.zeros_like(coor)
for i in range(delta_u.shape[0]):
delta_u[i, 0] = 0.1 * (coor[i, 1] - coor[0, 1])
for loop in range(2):
if loop == 0:
system.eventDriven_setDeltaU(delta_u)
delta_u = system.eventDriven_deltaU()
else:
system.eventDriven_setDeltaU(delta_u)
system.setU(np.zeros_like(coor))
settings = [
[+1, 0, 0, -1, 0], # : .| | | |
[+1, 0, 0, -1, 0], # : .| | | |
[+1, 1, 0, +1, 0], # : |. | | |
[+1, 0, 1, -1, 0], # : | .| | |
[+1, 1, 1, +1, 0], # : | |. | |
[+1, 0, 2, -1, 0], # : | | .| |
[+1, 1, 2, +1, 0], # : | | |. |
[-1, 0, 2, +1, 0], # : | | |. |
[-1, 1, 2, -1, 0], # : | | .| |
[-1, 0, 1, +1, 0], # : | |. | |
[-1, 1, 1, -1, 0], # : | .| | |
[-1, 0, 0, +1, 0], # : |. | | |
[-1, 1, 0, -1, 0], # : .| | | |
[-1, 0, 0, -1, 1], # : .| | | | (symmetry, throw)
[-1, 1, 0, +1, 1], # : |. | | | (symmetry, not tested)
[-1, 0, 1, -1, 1], # : | .| | | (symmetry, not tested)
]
for direction, kick, index, f, throw in settings:
eps_expect = epsy[0, index] + f * 0.5 * 0.1
if throw:
with self.assertRaises(IndexError):
system.eventDrivenStep(0.1, kick, direction)
break
system.eventDrivenStep(0.1, kick, direction)
self.assertTrue(np.allclose(GMat.Epsd(system.plastic_Eps()), eps_expect))
self.assertTrue(system.residual() < 1e-5)
def test_eventDrivenSimpleShear_random(self):
"""
Like :py:func:`test_eventDrivenSimpleShear` but with random yield strains.
"""
mesh = GooseFEM.Mesh.Quad4.Regular(3, 3)
nelem = mesh.nelem()
coor = mesh.coor()
dofs = mesh.dofs()
dofs[mesh.nodesLeftOpenEdge(), ...] = dofs[mesh.nodesRightOpenEdge(), ...]
system = FrictionQPotFEM.Generic2d.System(
coor,
mesh.conn(),
dofs,
dofs[np.concatenate((mesh.nodesBottomEdge(), mesh.nodesTopEdge())), :].ravel(),
[0, 1, 2, 6, 7, 8],
[3, 4, 5],
)
nelas = system.elastic().size
nplas = system.plastic().size
epsy = 1e-2 * np.cumsum(np.random.random((nplas, 100)), axis=1)
deps = 0.1 * np.min(np.diff(epsy, axis=1))
system.setMassMatrix(np.ones(nelem))
system.setDampingMatrix(np.ones(nelem))
system.setElastic(np.ones(nelas), np.ones(nelas))
system.setPlastic(np.ones(nplas), np.ones(nplas), epsy)
system.setDt(1.0)
delta_u = np.zeros_like(coor)
for i in range(delta_u.shape[0]):
delta_u[i, 0] = 0.1 * (coor[i, 1] - coor[0, 1])
system.eventDriven_setDeltaU(delta_u)
kicks = np.zeros(50, dtype=bool)
kicks[1::2] = True
for inc, kick in enumerate(kicks):
idx_n = system.plastic_CurrentIndex()
u_n = system.u()
system.eventDrivenStep(deps, kick, iterative=True)
idx = system.plastic_CurrentIndex()
if kick:
self.assertTrue(not np.all(idx == idx_n))
else:
self.assertTrue(np.all(idx == idx_n))
system.setU(u_n)
system.eventDrivenStep(deps, kick)
idx = system.plastic_CurrentIndex()
if kick:
self.assertTrue(not np.all(idx == idx_n))
else:
self.assertTrue(np.all(idx == idx_n))
for kick in kicks:
idx_n = system.plastic_CurrentIndex()
u_n = system.u()
system.setU(u_n)
system.eventDrivenStep(deps, kick, -1, iterative=True)
idx = system.plastic_CurrentIndex()
if kick:
self.assertTrue(not np.all(idx == idx_n))
else:
self.assertTrue(np.all(idx == idx_n))
if np.any(idx_n == 0):
with self.assertRaises(IndexError):
system.eventDrivenStep(deps, kick, -1)
break
system.setU(u_n)
system.eventDrivenStep(deps, kick, -1)
idx = system.plastic_CurrentIndex()
if kick:
self.assertTrue(not np.all(idx == idx_n))
else:
self.assertTrue(np.all(idx == idx_n))
def test_eventDrivenSimpleShear_element(self):
"""
Like :py:func:`test_eventDrivenSimpleShear` but with slightly different yield strains
per element.
"""
mesh = GooseFEM.Mesh.Quad4.Regular(3, 3)
nelem = mesh.nelem()
coor = mesh.coor()
dofs = mesh.dofs()
dofs[mesh.nodesLeftOpenEdge(), ...] = dofs[mesh.nodesRightOpenEdge(), ...]
system = FrictionQPotFEM.Generic2d.System(
coor,
mesh.conn(),
dofs,
dofs[np.concatenate((mesh.nodesBottomEdge(), mesh.nodesTopEdge())), :].ravel(),
[0, 1, 2, 6, 7, 8],
[3, 4, 5],
)
nelas = system.elastic().size
nplas = system.plastic().size
epsy = np.cumsum(np.ones((nplas, 5)), axis=1)
system.setMassMatrix(np.ones(nelem))
system.setDampingMatrix(np.ones(nelem))
system.setElastic(np.ones(nelas), np.ones(nelas))
system.setPlastic(np.ones(nplas), np.ones(nplas), epsy)
system.setDt(1.0)
epsy_element = np.zeros(epsy.shape)
mat = system.material_plastic()
for e in range(mat.shape()[0]):
c = mat.refCusp([e, 1])
y = c.epsy() + 0.1
epsy_element[e, :] = y[1:]
c.reset_epsy(y, init_elastic=False)
delta_u = np.zeros_like(coor)
for i in range(delta_u.shape[0]):
delta_u[i, 0] = 0.1 * (coor[i, 1] - coor[0, 1])
for loop in range(2):
if loop == 0:
system.eventDriven_setDeltaU(delta_u)
delta_u = system.eventDriven_deltaU()
else:
system.eventDriven_setDeltaU(delta_u)
system.setU(np.zeros_like(coor))
settings = [
[+1, 0, 0, -1, 0], # : .| | | |
[+1, 0, 0, -1, 0], # : .| | | |
[+1, 1, 0, +1, 0], # : |. | | |
[+1, 0, 1, -1, 0], # : | .| | |
[+1, 1, 1, +1, 0], # : | |. | |
[+1, 0, 2, -1, 0], # : | | .| |
[+1, 1, 2, +1, 0], # : | | |. |
]
for direction, kick, index, f, throw in settings:
if not kick:
eps_expect = epsy[0, index] + f * 0.5 * 0.05
else:
eps_expect = epsy_element[0, index] + f * 0.5 * 0.05
if throw:
with self.assertRaises(IndexError):
system.eventDrivenStep(0.05, kick, direction, yield_element=True)
break
system.eventDrivenStep(0.05, kick, direction, yield_element=True)
self.assertTrue(np.allclose(GMat.Epsd(system.plastic_Eps()), eps_expect))
self.assertTrue(system.residual() < 1e-5)
def test_flowSteps(self):
"""
Basic test of:
- Generic2d.System.flowSteps
- Generic2d.System.timeSteps
"""
mesh = GooseFEM.Mesh.Quad4.Regular(3, 3)
nelem = mesh.nelem()
system = FrictionQPotFEM.Generic2d.System(
coor=mesh.coor(),
conn=mesh.conn(),
dofs=mesh.dofs(),
iip=np.arange(mesh.nnode() * mesh.ndim()),
elem_elastic=[0, 1, 2, 6, 7, 8],
elem_plastic=[3, 4, 5],
)
system.setMassMatrix(np.ones(nelem))
system.setDampingMatrix(np.ones(nelem))
system.setElastic(np.ones(6), np.ones(6))
system.setPlastic(np.ones(3), np.ones(3), [[100.0], [100.0], [100.0]])
system.setDt(1.0)
x = mesh.coor()
v = np.zeros_like(x)
for i in range(v.shape[0]):
v[i, 0] = 0.1 * (x[i, 1] - x[0, 1])
system.flowSteps(10, v)
# displacement is added affinely in an elastic system:
# there is not residual force -> the system stays uniform
self.assertTrue(np.allclose(system.u(), 10 * v))
self.assertTrue(np.allclose(system.t(), 10))
system.timeSteps(10)
self.assertTrue(np.allclose(system.u(), 10 * v))
self.assertTrue(np.allclose(system.t(), 20))
def test_damping_alpha_no_eta(self):
mesh = GooseFEM.Mesh.Quad4.Regular(3, 3)
nelem = mesh.nelem()
system = FrictionQPotFEM.Generic2d.System(
coor=mesh.coor(),
conn=mesh.conn(),
dofs=mesh.dofsPeriodic(),
iip=[],
elem_elastic=[0, 1, 2, 6, 7, 8],
elem_plastic=[3, 4, 5],
)
alpha = 1.2
system.setMassMatrix(np.ones(nelem))
system.setDampingMatrix(alpha * np.ones(nelem))
system.setElastic(np.ones(6), np.ones(6))
system.setPlastic(np.ones(3), np.ones(3), [[100.0], [100.0], [100.0]])
system.setDt(1.0)
system.setV(np.ones_like(mesh.coor()))
assert np.allclose(system.vector().AsDofs(system.fdamp()), alpha)
def test_damping_no_alpha_eta(self):
mesh = GooseFEM.Mesh.Quad4.Regular(3, 3)
coor = mesh.coor()
conn = mesh.conn()
nelem = mesh.nelem()
system = FrictionQPotFEM.Generic2d.System(
coor=coor,
conn=conn,
dofs=mesh.dofsPeriodic(),
iip=[],
elem_elastic=[0, 1, 2, 6, 7, 8],
elem_plastic=[3, 4, 5],
)
eta = 3.4
system.setMassMatrix(np.ones(nelem))
system.setEta(eta)
system.setElastic(np.ones(6), np.ones(6))
system.setPlastic(np.ones(3), np.ones(3), [[100.0], [100.0], [100.0]])
system.setDt(1.0)
f = np.zeros_like(coor)
v = np.zeros_like(coor)
v[conn[-3:, :], 0] = 2
f[conn[:3, -2:], 0] = -eta
f[conn[-3:, :2], 0] = eta
system.setV(v)
assert np.allclose(system.fdamp(), f)
def test_damping_alpha_eta(self):
mesh = GooseFEM.Mesh.Quad4.Regular(3, 3)
coor = mesh.coor()
conn = mesh.conn()
nelem = mesh.nelem()
system = FrictionQPotFEM.Generic2d.System(
coor=coor,
conn=conn,
dofs=mesh.dofsPeriodic(),
iip=[],
elem_elastic=[0, 1, 2, 6, 7, 8],
elem_plastic=[3, 4, 5],
)
alpha = 1.2
eta = 3.4
system.setMassMatrix(np.ones(nelem))
system.setEta(eta)
system.setDampingMatrix(alpha * np.ones(nelem))
system.setElastic(np.ones(6), np.ones(6))
system.setPlastic(np.ones(3), np.ones(3), [[100.0], [100.0], [100.0]])
system.setDt(1.0)
f = np.zeros_like(coor)
v = np.zeros_like(coor)
v[conn[-3:, :], 0] = 2
f[conn[-3:, :], 0] = 2 * alpha
f[conn[:3, -2:], 0] += -eta
f[conn[-3:, :2], 0] += eta
system.setV(v)
assert np.allclose(system.fdamp(), f)
if __name__ == "__main__":
unittest.main()
| [
"numpy.ones",
"numpy.random.random",
"numpy.diff",
"numpy.any",
"numpy.zeros",
"GooseFEM.Mesh.Quad4.Regular",
"unittest.main",
"numpy.all",
"numpy.zeros_like"
] | [((13114, 13129), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13127, 13129), False, 'import unittest\n'), ((443, 476), 'GooseFEM.Mesh.Quad4.Regular', 'GooseFEM.Mesh.Quad4.Regular', (['(3)', '(3)'], {}), '(3, 3)\n', (470, 476), False, 'import GooseFEM\n'), ((1306, 1325), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (1319, 1325), True, 'import numpy as np\n'), ((3466, 3499), 'GooseFEM.Mesh.Quad4.Regular', 'GooseFEM.Mesh.Quad4.Regular', (['(3)', '(3)'], {}), '(3, 3)\n', (3493, 3499), False, 'import GooseFEM\n'), ((4398, 4417), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (4411, 4417), True, 'import numpy as np\n'), ((4585, 4609), 'numpy.zeros', 'np.zeros', (['(50)'], {'dtype': 'bool'}), '(50, dtype=bool)\n', (4593, 4609), True, 'import numpy as np\n'), ((6358, 6391), 'GooseFEM.Mesh.Quad4.Regular', 'GooseFEM.Mesh.Quad4.Regular', (['(3)', '(3)'], {}), '(3, 3)\n', (6385, 6391), False, 'import GooseFEM\n'), ((7226, 7246), 'numpy.zeros', 'np.zeros', (['epsy.shape'], {}), '(epsy.shape)\n', (7234, 7246), True, 'import numpy as np\n'), ((7500, 7519), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (7513, 7519), True, 'import numpy as np\n'), ((9226, 9259), 'GooseFEM.Mesh.Quad4.Regular', 'GooseFEM.Mesh.Quad4.Regular', (['(3)', '(3)'], {}), '(3, 3)\n', (9253, 9259), False, 'import GooseFEM\n'), ((9862, 9878), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (9875, 9878), True, 'import numpy as np\n'), ((10436, 10469), 'GooseFEM.Mesh.Quad4.Regular', 'GooseFEM.Mesh.Quad4.Regular', (['(3)', '(3)'], {}), '(3, 3)\n', (10463, 10469), False, 'import GooseFEM\n'), ((11216, 11249), 'GooseFEM.Mesh.Quad4.Regular', 'GooseFEM.Mesh.Quad4.Regular', (['(3)', '(3)'], {}), '(3, 3)\n', (11243, 11249), False, 'import GooseFEM\n'), ((11838, 11857), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (11851, 11857), True, 'import numpy as np\n'), ((11870, 11889), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (11883, 11889), True, 'import numpy as np\n'), ((12117, 12150), 'GooseFEM.Mesh.Quad4.Regular', 'GooseFEM.Mesh.Quad4.Regular', (['(3)', '(3)'], {}), '(3, 3)\n', (12144, 12150), False, 'import GooseFEM\n'), ((12815, 12834), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (12828, 12834), True, 'import numpy as np\n'), ((12847, 12866), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (12860, 12866), True, 'import numpy as np\n'), ((1016, 1035), 'numpy.ones', 'np.ones', (['(nplas, 5)'], {}), '((nplas, 5))\n', (1023, 1035), True, 'import numpy as np\n'), ((1075, 1089), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (1082, 1089), True, 'import numpy as np\n'), ((1123, 1137), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (1130, 1137), True, 'import numpy as np\n'), ((1165, 1179), 'numpy.ones', 'np.ones', (['nelas'], {}), '(nelas)\n', (1172, 1179), True, 'import numpy as np\n'), ((1181, 1195), 'numpy.ones', 'np.ones', (['nelas'], {}), '(nelas)\n', (1188, 1195), True, 'import numpy as np\n'), ((1223, 1237), 'numpy.ones', 'np.ones', (['nplas'], {}), '(nplas)\n', (1230, 1237), True, 'import numpy as np\n'), ((1239, 1253), 'numpy.ones', 'np.ones', (['nplas'], {}), '(nplas)\n', (1246, 1253), True, 'import numpy as np\n'), ((4167, 4181), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (4174, 4181), True, 'import numpy as np\n'), ((4215, 4229), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (4222, 4229), True, 'import numpy as np\n'), ((4257, 4271), 'numpy.ones', 'np.ones', (['nelas'], {}), '(nelas)\n', (4264, 4271), True, 'import numpy as np\n'), ((4273, 4287), 'numpy.ones', 'np.ones', (['nelas'], {}), '(nelas)\n', (4280, 4287), True, 'import numpy as np\n'), ((4315, 4329), 'numpy.ones', 'np.ones', (['nplas'], {}), '(nplas)\n', (4322, 4329), True, 'import numpy as np\n'), ((4331, 4345), 'numpy.ones', 'np.ones', (['nplas'], {}), '(nplas)\n', (4338, 4345), True, 'import numpy as np\n'), ((5718, 5736), 'numpy.any', 'np.any', (['(idx_n == 0)'], {}), '(idx_n == 0)\n', (5724, 5736), True, 'import numpy as np\n'), ((6931, 6950), 'numpy.ones', 'np.ones', (['(nplas, 5)'], {}), '((nplas, 5))\n', (6938, 6950), True, 'import numpy as np\n'), ((6990, 7004), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (6997, 7004), True, 'import numpy as np\n'), ((7038, 7052), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (7045, 7052), True, 'import numpy as np\n'), ((7080, 7094), 'numpy.ones', 'np.ones', (['nelas'], {}), '(nelas)\n', (7087, 7094), True, 'import numpy as np\n'), ((7096, 7110), 'numpy.ones', 'np.ones', (['nelas'], {}), '(nelas)\n', (7103, 7110), True, 'import numpy as np\n'), ((7138, 7152), 'numpy.ones', 'np.ones', (['nplas'], {}), '(nplas)\n', (7145, 7152), True, 'import numpy as np\n'), ((7154, 7168), 'numpy.ones', 'np.ones', (['nplas'], {}), '(nplas)\n', (7161, 7168), True, 'import numpy as np\n'), ((9606, 9620), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (9613, 9620), True, 'import numpy as np\n'), ((9654, 9668), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (9661, 9668), True, 'import numpy as np\n'), ((9696, 9706), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (9703, 9706), True, 'import numpy as np\n'), ((9708, 9718), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (9715, 9718), True, 'import numpy as np\n'), ((9746, 9756), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (9753, 9756), True, 'import numpy as np\n'), ((9758, 9768), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (9765, 9768), True, 'import numpy as np\n'), ((10809, 10823), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (10816, 10823), True, 'import numpy as np\n'), ((10907, 10917), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (10914, 10917), True, 'import numpy as np\n'), ((10919, 10929), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (10926, 10929), True, 'import numpy as np\n'), ((10957, 10967), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (10964, 10967), True, 'import numpy as np\n'), ((10969, 10979), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (10976, 10979), True, 'import numpy as np\n'), ((11627, 11641), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (11634, 11641), True, 'import numpy as np\n'), ((11696, 11706), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (11703, 11706), True, 'import numpy as np\n'), ((11708, 11718), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (11715, 11718), True, 'import numpy as np\n'), ((11746, 11756), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (11753, 11756), True, 'import numpy as np\n'), ((11758, 11768), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (11765, 11768), True, 'import numpy as np\n'), ((12548, 12562), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (12555, 12562), True, 'import numpy as np\n'), ((12673, 12683), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (12680, 12683), True, 'import numpy as np\n'), ((12685, 12695), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (12692, 12695), True, 'import numpy as np\n'), ((12723, 12733), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (12730, 12733), True, 'import numpy as np\n'), ((12735, 12745), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (12742, 12745), True, 'import numpy as np\n'), ((4046, 4076), 'numpy.random.random', 'np.random.random', (['(nplas, 100)'], {}), '((nplas, 100))\n', (4062, 4076), True, 'import numpy as np\n'), ((4114, 4135), 'numpy.diff', 'np.diff', (['epsy'], {'axis': '(1)'}), '(epsy, axis=1)\n', (4121, 4135), True, 'import numpy as np\n'), ((10865, 10879), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (10872, 10879), True, 'import numpy as np\n'), ((12631, 12645), 'numpy.ones', 'np.ones', (['nelem'], {}), '(nelem)\n', (12638, 12645), True, 'import numpy as np\n'), ((1695, 1714), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (1708, 1714), True, 'import numpy as np\n'), ((5001, 5021), 'numpy.all', 'np.all', (['(idx == idx_n)'], {}), '(idx == idx_n)\n', (5007, 5021), True, 'import numpy as np\n'), ((5277, 5297), 'numpy.all', 'np.all', (['(idx == idx_n)'], {}), '(idx == idx_n)\n', (5283, 5297), True, 'import numpy as np\n'), ((5680, 5700), 'numpy.all', 'np.all', (['(idx == idx_n)'], {}), '(idx == idx_n)\n', (5686, 5700), True, 'import numpy as np\n'), ((6129, 6149), 'numpy.all', 'np.all', (['(idx == idx_n)'], {}), '(idx == idx_n)\n', (6135, 6149), True, 'import numpy as np\n'), ((7889, 7908), 'numpy.zeros_like', 'np.zeros_like', (['coor'], {}), '(coor)\n', (7902, 7908), True, 'import numpy as np\n'), ((4929, 4949), 'numpy.all', 'np.all', (['(idx == idx_n)'], {}), '(idx == idx_n)\n', (4935, 4949), True, 'import numpy as np\n'), ((5205, 5225), 'numpy.all', 'np.all', (['(idx == idx_n)'], {}), '(idx == idx_n)\n', (5211, 5225), True, 'import numpy as np\n'), ((5608, 5628), 'numpy.all', 'np.all', (['(idx == idx_n)'], {}), '(idx == idx_n)\n', (5614, 5628), True, 'import numpy as np\n'), ((6057, 6077), 'numpy.all', 'np.all', (['(idx == idx_n)'], {}), '(idx == idx_n)\n', (6063, 6077), True, 'import numpy as np\n')] |
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
import csv
import numpy as np
# Read The data
training_set = pd.read_json('../raw_data/train_set.json')
test_set = pd.read_json('../raw_data/test_set.json')
document_set = pd.read_json('../raw_data/documents.json')
summs = training_set['summary'].to_numpy()
with open('recap.txt', "w") as f:
for a in summs:
f.write(a)
f.write("\n\n\n")
f.close
quit()
labels_train = training_set['label'].to_numpy()
labels_test = training_set["label"].to_numpy()
print('> Label repartition in the dataset')
fq_1_train = np.sum(labels_train) / len(labels_train)
fq_0_train = 1 - fq_1_train
print(' > For the training set:', fq_1_train, fq_0_train)
fq_1_test = np.sum(labels_test) / len(labels_test)
fq_0_test = 1 - fq_1_test
print(' > For the testing set:', fq_1_test, fq_0_test)
#print(training_set['summary'].iloc[0])
#print(training_set['summary'].iloc[2])
for _, row in training_set.iterrows():
if row['label'] == 0:
print("doc", row['document'])
print("sum", row['summary'])
print("\n---------------------------")
| [
"numpy.sum",
"pandas.read_json"
] | [((194, 236), 'pandas.read_json', 'pd.read_json', (['"""../raw_data/train_set.json"""'], {}), "('../raw_data/train_set.json')\n", (206, 236), True, 'import pandas as pd\n'), ((248, 289), 'pandas.read_json', 'pd.read_json', (['"""../raw_data/test_set.json"""'], {}), "('../raw_data/test_set.json')\n", (260, 289), True, 'import pandas as pd\n'), ((305, 347), 'pandas.read_json', 'pd.read_json', (['"""../raw_data/documents.json"""'], {}), "('../raw_data/documents.json')\n", (317, 347), True, 'import pandas as pd\n'), ((666, 686), 'numpy.sum', 'np.sum', (['labels_train'], {}), '(labels_train)\n', (672, 686), True, 'import numpy as np\n'), ((811, 830), 'numpy.sum', 'np.sum', (['labels_test'], {}), '(labels_test)\n', (817, 830), True, 'import numpy as np\n')] |
import json
import numpy as np
if __name__ == "__main__":
models = {'vanilla': 0, 'classification': 0, 'proxi_dist': 0, 'combined': 0}
models_list = ['vanilla', 'classification', 'proxi_dist', 'combined']# for consistency in older versions
for flavor in models_list:
with open(f'./accuracy_{flavor}_black.txt', 'r') as f:
models[flavor] = json.load(f)
# Models initialized with their base accuracy
acc = {'a': [0.993900], 'b': [0.992500], 'c': [0.993800], 'd': [0.981200], 'e': [0.980700]}
acc_list = list(acc.keys())
for model in acc_list:
acc[model].append(models['vanilla'][f'{model}-fgsm-attack'])
for flavor in models_list:
acc[model].append(models[flavor][f'{model}-fgsm'])
argmax = np.argmax(acc[model][1:]) + 1
acc[model][argmax] = f'\\textbf{{{acc[model][argmax]}}}'
with open('./blackbox_table.tex', 'w') as f:
c = ['c'] * (len(models_list) + 3)
f.write("\\begin{table}[H]\n\centering\n\\begin{tabular}{")
f.write('|'.join(c))
f.write("}\nSubstitute & No Attack & No Defense & Vanilla & Classification & Proximity and Distance & Combined \\\\ \\hline\n")
for model in acc_list:
acc[model].insert(0, model.upper())
f.write(' & '.join(str(x) for x in acc[model]))
f.write('\\\\\n')
f.write('\\end{tabular}\n')
f.write('\\caption{Classification accuracy of different models based on the FGSM Black-Box attack on various substitute models with $\epsilon=0.3$.}\n')
f.write('\\label{table:blackbox-result}\n')
f.write('\\end{table}\n') | [
"json.load",
"numpy.argmax"
] | [((373, 385), 'json.load', 'json.load', (['f'], {}), '(f)\n', (382, 385), False, 'import json\n'), ((786, 811), 'numpy.argmax', 'np.argmax', (['acc[model][1:]'], {}), '(acc[model][1:])\n', (795, 811), True, 'import numpy as np\n')] |
import torch
import os
import random
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
import sys
import json
from glob import glob
from PIL import ImageDraw
from misc.mask_utils import scatterMask
from misc.utils import denorm
import glob
from scipy.io import loadmat
from tqdm import tqdm
module_path = os.path.abspath(os.getcwd())
if module_path not in sys.path:
sys.path.append(module_path)
# ==================================================================#
# == CelebA
# ==================================================================#
MASK_LABELS = {
0: 'Background',
1: 'Cap/hat',
2: 'Helmet',
3: 'Face',
4: 'Hair',
5: 'Left-arm',
6: 'Right-arm',
7: 'Left-hand',
8: 'Right-hand',
9: 'Protector',
10: 'Bikini/bra',
11: 'Jacket/windbreaker/hoodie',
12: 'Tee-shirt',
13: 'Polo-shirt',
14: 'Sweater',
15: 'Singlet',
16: 'Torso-skin',
17: 'Pants',
18: 'Shorts/swim-shorts',
19: 'Skirt',
20: 'Stockings',
21: 'Socks',
22: 'Left-boot',
23: 'Right-boot',
24: 'Left-shoe',
25: 'Right-shoe',
26: 'Left-highheel',
27: 'Right-highheel',
28: 'Left-sandal',
29: 'Right-sandal',
30: 'Left-leg',
31: 'Right-leg',
32: 'Left-foot',
33: 'Right-foot',
34: 'Coat',
35: 'Dress',
36: 'Robe',
37: 'Jumpsuit',
38: 'Other-full-body-clothes',
39: 'Headwear',
40: 'Backpack',
41: 'Ball',
42: 'Bats',
43: 'Belt',
44: 'Bottle',
45: 'Carrybag',
46: 'Cases',
47: 'Sunglasses',
48: 'Eyewear',
49: 'Glove',
50: 'Scarf',
51: 'Umbrella',
52: 'Wallet/purse',
53: 'Watch',
54: 'Wristband',
55: 'Tie',
56: 'Other-accessary',
57: 'Other-upper-body-clothes',
58: 'Other-lower-body-clothes',
}
MASK_ATTRS = {value: key for key, value in MASK_LABELS.items()}
# Pose
# 0: Right-ankle
# 1: Right-knee
# 2: Right-hip
# 3: Left-hip
# 4: Left-knee
# 5: Left-ankle
# 6: Pelvis
# 7: Thorax
# 8: Upper-neck
# 9: Head-top
# 10: Right-wrist
# 11: Right-elbow
# 12: Right-shoulder
# 13: Left-shoulder
# 14: Left-elbow
# 15: Left-wrist
# 16: Face-bbox-top-left-corner-point
# 17: Face-bbox-bottom-right-corner-point
# 18: Instance-bbox-top-left-corner-point
# 19: Instance-bbox-bottom-right-corner-point
class LV_MHP_v2(Dataset):
def __init__(self,
image_size,
transform,
mode,
shuffling=False,
all_attr=0,
verbose=False,
sampled=100,
show_attr='',
CREATE_DATASET=False,
**kwargs):
self.image_size = image_size
self.shuffling = shuffling
mode = 'train' if mode == 'train' else 'val'
self.mode = mode
self.name = self.__class__.__name__
self.all_attr = all_attr
self.verbose = verbose
self.show_attr = show_attr.split(',')
self.sampled = sampled # How much data to train (percentage)
self.data_dir = 'data/{}'.format(self.name)
ids = os.path.join(self.data_dir, 'list', self.mode + '.txt')
self.ids = [f.strip() for f in open(ids).readlines()]
self.colormap = loadmat(
os.path.join(self.data_dir,
'LV-MHP-v2_colormap.mat'))['MHP_colormap']
self.colorize = Colorize(self.colormap)
self.data_dir = os.path.join(self.data_dir, self.mode)
self.attr2idx = {}
self.idx2attr = {}
self.mask_label = MASK_LABELS
self.mask_attr = MASK_ATTRS
self.attr2filenames = {}
self.NOTattr2filenames = {}
self.transform_resize_img = transform.resize_rgb
self.transform_resize_mask = transform.resize_mask
self.transform_common = transform.common
self.transform = transform
if 'config' in kwargs.keys():
self.config = kwargs['config']
else:
from types import SimpleNamespace
self.config = SimpleNamespace()
if self.verbose:
print('Start preprocessing %s: %s!' % (self.name, mode))
random.seed(1)
if CREATE_DATASET:
self.create_dataset()
else:
self.preprocess()
self.filenames, self.labels = self.subsample(self.filenames,
self.labels)
if self.verbose:
_str = str(self.num_data)
print('Finished preprocessing %s: %s (%s)!' %
(self.name, mode, _str))
# self.write_lines()
def write_lines(self):
with open('{}/LV_MHP_v2_list_{}.txt'.format(self.data_dir, self.mode),
'w') as f:
for line in self.filenames:
f.writelines(line + '\n')
def histogram(self):
from misc.utils import PRINT
values = np.sum(self.labels, axis=0)
dict_ = {}
# import ipdb; ipdb.set_trace()
for key, value in zip(self.selected_attrs, values):
dict_[key] = value
total = 0
with open('datasets/{}_histogram_attributes.txt'.format(self.name),
'w') as f:
for key, value in sorted(dict_.items(),
key=lambda kv: (kv[1], kv[0]),
reverse=True):
total += value
PRINT(f, '{} {}'.format(key, value))
PRINT(f, 'TOTAL {}'.format(total))
def preprocess(self):
if self.show_attr != '':
self.selected_attrs = self.show_attr
self.config.ATTR = self.show_attr
else:
self.selected_attrs = [
'NOT_Cap/hat',
'Cap/hat',
'NOT_Jacket/windbreaker/hoodie',
'Jacket/windbreaker/hoodie',
]
for i, attr in enumerate(self.selected_attrs):
self.attr2idx[attr] = i
self.idx2attr[i] = attr
self.attr2filenames[attr] = []
self.NOTattr2filenames[attr] = []
# lines = self.subsample(lines)
# if self.shuffling:
# random.shuffle(self.lines)
# random.shuffle(self.lines)
self.filenames = []
self.labels = []
self.segs = []
self.pose = []
no_pose = 0
for i, line in enumerate(tqdm(self.ids, leave=False)):
filename = os.path.join(self.data_dir, 'images', line + '.jpg')
pose = os.path.join(self.data_dir, 'pose_annos', line + '.mat')
segs = sorted(
glob.glob(
os.path.join(self.data_dir, 'parsing_annos', line + '_*')))
# import ipdb; ipdb.set_trace()
no_show_attr = True
# import ipdb; ipdb.set_trace()
for seg in segs:
person_id = int(
os.path.splitext(os.path.basename(seg))[0].split('_')
[-1]) - 1 # starts from 0
# segmap = self.get_mask_from_file(seg, no_show=True)
values_sem = self.get_mask_from_file(seg, label=True)
values_sem = values_sem.unique()
label = []
for attr in self.selected_attrs:
selected_value = self.get_value(values_sem, attr)
if selected_value >= 1:
label.append(selected_value)
self.attr2filenames[attr].append(line[0])
no_show_attr = False
else:
label.append(0)
self.NOTattr2filenames[attr].append(line[0])
if self.show_attr and no_show_attr:
continue
try:
pose_id = loadmat(pose)['person_%d' % person_id]
except BaseException:
# import ipdb; ipdb.set_trace()
no_pose += 1
continue
# import ipdb; ipdb.set_trace()
self.filenames.append(filename)
self.labels.append(label)
self.segs.append(seg)
self.pose.append(pose_id)
print("No pose found:", no_pose)
if not self.show_attr:
self.histogram()
self.num_data = len(self.filenames)
def create_dataset(self):
no_pose = 0
new_images = os.path.join(self.data_dir, 'new_images')
os.makedirs(new_images, exist_ok=True)
new_segs = os.path.join(self.data_dir, 'new_segs')
os.makedirs(new_segs, exist_ok=True)
new_pose = os.path.join(self.data_dir, 'new_pose')
os.makedirs(new_pose, exist_ok=True)
new_labels = os.path.join(self.data_dir, 'new_labels')
os.makedirs(new_labels, exist_ok=True)
self.selected_attrs = self.mask_attr.keys()
for i, line in enumerate(tqdm(self.ids, leave=False)):
filename = os.path.join(self.data_dir, 'images', line + '.jpg')
pose = os.path.join(self.data_dir, 'pose_annos', line + '.mat')
segs = sorted(
glob.glob(
os.path.join(self.data_dir, 'parsing_annos', line + '_*')))
# import ipdb; ipdb.set_trace()
no_show_attr = True
# import ipdb; ipdb.set_trace()
for seg in segs:
person_id = int(
os.path.splitext(os.path.basename(seg))[0].split('_')
[-1]) - 1 # starts from 0
# segmap = self.get_mask_from_file(seg, no_show=True)
segmap = self.get_mask_from_file(seg, label=True)[0]
try:
pose_id = loadmat(pose)['person_%d' % person_id]
except BaseException:
# import ipdb; ipdb.set_trace()
no_pose += 1
continue
import ipdb
ipdb.set_trace()
labels = [self.mask_label[i] for i in segmap.unique()]
nonzero = torch.nonzero(segmap)
bbox = nonzero.min(0)[0].tolist()
bbox.extend(nonzero.max(0)[0].tolist()) # x1, y1, x2, y2
self.filenames.append(filename)
self.labels.append(label)
self.segs.append(seg)
self.pose.append(pose_id)
print("No pose found:", no_pose)
if not self.show_attr:
self.histogram()
self.num_data = len(self.filenames)
def get_value(self, values, attr):
NOT = False
if 'NOT_' in attr:
NOT = True
attr = attr.replace('NOT_', '')
index = list(self.mask_label.values()).index(attr)
value = int(index in values)
if NOT:
value = 1 - value
if value == -1:
import ipdb
ipdb.set_trace()
assert value != -1
return value
def get_data(self):
return self.filenames, self.labels
def get_mask_from_file(self, maskname, no_show=False, label=False):
mask = Image.open(maskname).convert('RGB')
mask = self.transform_resize_mask(mask)
mask = self.transform_common(mask)[0] * 255. # 0, 255
if self.show_attr and not no_show:
labels_real = self.get_partial_mask(mask).unsqueeze(0)
elif label:
labels_real = mask
else:
labels_real = scatterMask(mask, num_channels=len(self.mask_label))
# labels_real: C x size x size
return labels_real # 59 attrs
def get_partial_mask(self, mask):
new_mask = torch.zeros_like(mask)
for attr in self.selected_attrs:
label = self.mask_attr[attr]
new_mask[mask == label] = label
return new_mask
def __getitem__(self, index):
filename = self.filenames[index]
seg = self.segs[index]
label = self.labels[index]
pose = self.pose[index]
image = Image.open(filename)
# import ipdb; ipdb.set_trace()
if self.show_attr:
image = image.convert('RGBA')
img2 = image.copy()
zero_seg = np.zeros((image.size[::-1])).astype(np.uint8)
org_seg = self.get_mask_from_file(seg, no_show=True)
for idx, attr in enumerate(self.attributes):
_label = label[idx]
if _label == 1:
zero_seg += org_seg[self.mask_attr[attr]]
import ipdb
ipdb.set_trace()
zero_seg = self.colorize(zero_seg) / 255.
else:
image = image.convert('RGB')
# import ipdb; ipdb.set_trace()
seg = np.zeros((1 + len(self.attr2idx.keys()),
*image.size[::-1])).astype(np.uint8)
for label, segs in self.segs[index].items():
img_temp = Image.new('L', image.size, 0)
draw = ImageDraw.Draw(img_temp)
draw.polygon(segs, outline=1, fill=1)
img_temp = np.array(img_temp)
seg[label + 1][img_temp == 1] = 1
seg[0][seg.sum(0) == 0] = 1 # background
# seg = seg[None,:,:,:].repeat(3,0).transpose(1,0,2,3)
# to match the transform variable
# import ipdb; ipdb.set_trace()
bbox = self.bbox[index] # x1,y1,x2,y2
margin = (0.075, 0.075)
width_ = image.size[0] * margin[0]
height_ = image.size[1] * margin[1]
bbox[0] = max(0, bbox[0] - width_)
bbox[1] = max(0, bbox[1] - height_)
bbox[2] = min(image.size[0], bbox[2] + width_)
bbox[3] = min(image.size[1], bbox[3] + height_)
image = image.crop(bbox)
# import ipdb; ipdb.set_trace()
keyp = keyp.crop(bbox)
keyp = self.transform_resize_img(keyp)
keyp = self.transform_common(keyp)[0].unsqueeze(0)
seg = [Image.fromarray(i).crop(bbox).convert('RGB') for i in seg]
seg = [self.transform_resize_mask(i) for i in seg]
seg = [self.transform_common(i)[0] for i in seg]
seg = torch.stack(seg, dim=0) * 255
# seg = scatterMask(seg, num_channels=1+len(self.attr2idx.keys()))
image = self.transform_resize_img(image)
image = self.transform_common(image)
image = self.transform.norm(image)
# import ipdb; ipdb.set_trace()
if self.show_attr:
alpha = 0.4
image = (alpha * image) + (1 - alpha) * keyp
label = torch.FloatTensor(self.labels[index])
if self.config.TRAIN_MASK: # or self.config.ONLY_GEN:
_seg = image
image = seg
seg = _seg
return image, label, seg, keyp
def __len__(self):
return self.num_data
def shuffle(self, seed):
random.seed(seed)
random.shuffle(self.filenames)
random.seed(seed)
random.shuffle(self.labels)
def show_me(args):
from data_loader import get_transformations
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from misc.utils import denorm
import numpy as np
import matplotlib.pyplot as plt
attrs = args.attr # .split(',')
mode = 'train'
transform = get_transformations(mode='test', image_size=256)
data = LV_MHP_v2(256,
transform,
mode,
show_attr=attrs,
CREATE_DATASET=args.CREATE_DATASET,
verbose=True)
data_loader = DataLoader(dataset=data,
batch_size=64,
shuffle=False,
num_workers=4)
for i, (data, label, *_) in enumerate(data_loader):
data = denorm(data)
data = make_grid(data).numpy()
plt.figure(figsize=(20, 20))
plt.imshow(np.transpose(data, (1, 2, 0)), interpolation='nearest')
plt.show(block=True)
class Colorize(object):
def __init__(self, cmap):
# self.cmap = labelcolormap(n)
self.cmap = torch.from_numpy(cmap)
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
if __name__ == '__main__':
# ipython datasets/DeepFashion2.py -- --attr=vest
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--attr', type=str, default='all')
parser.add_argument('--CREATE_DATASET', action='store_true', default=False)
args = parser.parse_args()
# train_inception()
show_me(args)
| [
"PIL.Image.new",
"scipy.io.loadmat",
"torch.from_numpy",
"numpy.array",
"PIL.ImageDraw.Draw",
"torchvision.utils.make_grid",
"torch.ByteTensor",
"sys.path.append",
"argparse.ArgumentParser",
"torch.zeros_like",
"misc.utils.denorm",
"random.shuffle",
"types.SimpleNamespace",
"ipdb.set_trace... | [((349, 360), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (358, 360), False, 'import os\n'), ((399, 427), 'sys.path.append', 'sys.path.append', (['module_path'], {}), '(module_path)\n', (414, 427), False, 'import sys\n'), ((15340, 15388), 'data_loader.get_transformations', 'get_transformations', ([], {'mode': '"""test"""', 'image_size': '(256)'}), "(mode='test', image_size=256)\n", (15359, 15388), False, 'from data_loader import get_transformations\n'), ((15622, 15691), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data', 'batch_size': '(64)', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset=data, batch_size=64, shuffle=False, num_workers=4)\n', (15632, 15691), False, 'from torch.utils.data import DataLoader\n'), ((16727, 16752), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16750, 16752), False, 'import argparse\n'), ((3129, 3184), 'os.path.join', 'os.path.join', (['self.data_dir', '"""list"""', "(self.mode + '.txt')"], {}), "(self.data_dir, 'list', self.mode + '.txt')\n", (3141, 3184), False, 'import os\n'), ((3460, 3498), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.mode'], {}), '(self.data_dir, self.mode)\n', (3472, 3498), False, 'import os\n'), ((4185, 4199), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (4196, 4199), False, 'import random\n'), ((4931, 4958), 'numpy.sum', 'np.sum', (['self.labels'], {'axis': '(0)'}), '(self.labels, axis=0)\n', (4937, 4958), True, 'import numpy as np\n'), ((8480, 8521), 'os.path.join', 'os.path.join', (['self.data_dir', '"""new_images"""'], {}), "(self.data_dir, 'new_images')\n", (8492, 8521), False, 'import os\n'), ((8530, 8568), 'os.makedirs', 'os.makedirs', (['new_images'], {'exist_ok': '(True)'}), '(new_images, exist_ok=True)\n', (8541, 8568), False, 'import os\n'), ((8588, 8627), 'os.path.join', 'os.path.join', (['self.data_dir', '"""new_segs"""'], {}), "(self.data_dir, 'new_segs')\n", (8600, 8627), False, 'import os\n'), ((8636, 8672), 'os.makedirs', 'os.makedirs', (['new_segs'], {'exist_ok': '(True)'}), '(new_segs, exist_ok=True)\n', (8647, 8672), False, 'import os\n'), ((8692, 8731), 'os.path.join', 'os.path.join', (['self.data_dir', '"""new_pose"""'], {}), "(self.data_dir, 'new_pose')\n", (8704, 8731), False, 'import os\n'), ((8740, 8776), 'os.makedirs', 'os.makedirs', (['new_pose'], {'exist_ok': '(True)'}), '(new_pose, exist_ok=True)\n', (8751, 8776), False, 'import os\n'), ((8798, 8839), 'os.path.join', 'os.path.join', (['self.data_dir', '"""new_labels"""'], {}), "(self.data_dir, 'new_labels')\n", (8810, 8839), False, 'import os\n'), ((8848, 8886), 'os.makedirs', 'os.makedirs', (['new_labels'], {'exist_ok': '(True)'}), '(new_labels, exist_ok=True)\n', (8859, 8886), False, 'import os\n'), ((11708, 11730), 'torch.zeros_like', 'torch.zeros_like', (['mask'], {}), '(mask)\n', (11724, 11730), False, 'import torch\n'), ((12072, 12092), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (12082, 12092), False, 'from PIL import Image\n'), ((14594, 14631), 'torch.FloatTensor', 'torch.FloatTensor', (['self.labels[index]'], {}), '(self.labels[index])\n', (14611, 14631), False, 'import torch\n'), ((14899, 14916), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (14910, 14916), False, 'import random\n'), ((14925, 14955), 'random.shuffle', 'random.shuffle', (['self.filenames'], {}), '(self.filenames)\n', (14939, 14955), False, 'import random\n'), ((14964, 14981), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (14975, 14981), False, 'import random\n'), ((14990, 15017), 'random.shuffle', 'random.shuffle', (['self.labels'], {}), '(self.labels)\n', (15004, 15017), False, 'import random\n'), ((15850, 15862), 'misc.utils.denorm', 'denorm', (['data'], {}), '(data)\n', (15856, 15862), False, 'from misc.utils import denorm\n'), ((15910, 15938), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (15920, 15938), True, 'import matplotlib.pyplot as plt\n'), ((16022, 16042), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (16030, 16042), True, 'import matplotlib.pyplot as plt\n'), ((16158, 16180), 'torch.from_numpy', 'torch.from_numpy', (['cmap'], {}), '(cmap)\n', (16174, 16180), False, 'import torch\n'), ((4064, 4081), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '()\n', (4079, 4081), False, 'from types import SimpleNamespace\n'), ((6423, 6450), 'tqdm.tqdm', 'tqdm', (['self.ids'], {'leave': '(False)'}), '(self.ids, leave=False)\n', (6427, 6450), False, 'from tqdm import tqdm\n'), ((6476, 6528), 'os.path.join', 'os.path.join', (['self.data_dir', '"""images"""', "(line + '.jpg')"], {}), "(self.data_dir, 'images', line + '.jpg')\n", (6488, 6528), False, 'import os\n'), ((6548, 6604), 'os.path.join', 'os.path.join', (['self.data_dir', '"""pose_annos"""', "(line + '.mat')"], {}), "(self.data_dir, 'pose_annos', line + '.mat')\n", (6560, 6604), False, 'import os\n'), ((8972, 8999), 'tqdm.tqdm', 'tqdm', (['self.ids'], {'leave': '(False)'}), '(self.ids, leave=False)\n', (8976, 8999), False, 'from tqdm import tqdm\n'), ((9025, 9077), 'os.path.join', 'os.path.join', (['self.data_dir', '"""images"""', "(line + '.jpg')"], {}), "(self.data_dir, 'images', line + '.jpg')\n", (9037, 9077), False, 'import os\n'), ((9097, 9153), 'os.path.join', 'os.path.join', (['self.data_dir', '"""pose_annos"""', "(line + '.mat')"], {}), "(self.data_dir, 'pose_annos', line + '.mat')\n", (9109, 9153), False, 'import os\n'), ((10950, 10966), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (10964, 10966), False, 'import ipdb\n'), ((12591, 12607), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (12605, 12607), False, 'import ipdb\n'), ((14188, 14211), 'torch.stack', 'torch.stack', (['seg'], {'dim': '(0)'}), '(seg, dim=0)\n', (14199, 14211), False, 'import torch\n'), ((15958, 15987), 'numpy.transpose', 'np.transpose', (['data', '(1, 2, 0)'], {}), '(data, (1, 2, 0))\n', (15970, 15987), True, 'import numpy as np\n'), ((3292, 3345), 'os.path.join', 'os.path.join', (['self.data_dir', '"""LV-MHP-v2_colormap.mat"""'], {}), "(self.data_dir, 'LV-MHP-v2_colormap.mat')\n", (3304, 3345), False, 'import os\n'), ((10017, 10033), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (10031, 10033), False, 'import ipdb\n'), ((10131, 10152), 'torch.nonzero', 'torch.nonzero', (['segmap'], {}), '(segmap)\n', (10144, 10152), False, 'import torch\n'), ((11171, 11191), 'PIL.Image.open', 'Image.open', (['maskname'], {}), '(maskname)\n', (11181, 11191), False, 'from PIL import Image\n'), ((12970, 12999), 'PIL.Image.new', 'Image.new', (['"""L"""', 'image.size', '(0)'], {}), "('L', image.size, 0)\n", (12979, 12999), False, 'from PIL import Image\n'), ((13023, 13047), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_temp'], {}), '(img_temp)\n', (13037, 13047), False, 'from PIL import ImageDraw\n'), ((13129, 13147), 'numpy.array', 'np.array', (['img_temp'], {}), '(img_temp)\n', (13137, 13147), True, 'import numpy as np\n'), ((15878, 15893), 'torchvision.utils.make_grid', 'make_grid', (['data'], {}), '(data)\n', (15887, 15893), False, 'from torchvision.utils import make_grid\n'), ((16273, 16310), 'torch.ByteTensor', 'torch.ByteTensor', (['(3)', 'size[1]', 'size[2]'], {}), '(3, size[1], size[2])\n', (16289, 16310), False, 'import torch\n'), ((6679, 6736), 'os.path.join', 'os.path.join', (['self.data_dir', '"""parsing_annos"""', "(line + '_*')"], {}), "(self.data_dir, 'parsing_annos', line + '_*')\n", (6691, 6736), False, 'import os\n'), ((9228, 9285), 'os.path.join', 'os.path.join', (['self.data_dir', '"""parsing_annos"""', "(line + '_*')"], {}), "(self.data_dir, 'parsing_annos', line + '_*')\n", (9240, 9285), False, 'import os\n'), ((12257, 12283), 'numpy.zeros', 'np.zeros', (['image.size[::-1]'], {}), '(image.size[::-1])\n', (12265, 12283), True, 'import numpy as np\n'), ((7854, 7867), 'scipy.io.loadmat', 'loadmat', (['pose'], {}), '(pose)\n', (7861, 7867), False, 'from scipy.io import loadmat\n'), ((9782, 9795), 'scipy.io.loadmat', 'loadmat', (['pose'], {}), '(pose)\n', (9789, 9795), False, 'from scipy.io import loadmat\n'), ((13999, 14017), 'PIL.Image.fromarray', 'Image.fromarray', (['i'], {}), '(i)\n', (14014, 14017), False, 'from PIL import Image\n'), ((6959, 6980), 'os.path.basename', 'os.path.basename', (['seg'], {}), '(seg)\n', (6975, 6980), False, 'import os\n'), ((9508, 9529), 'os.path.basename', 'os.path.basename', (['seg'], {}), '(seg)\n', (9524, 9529), False, 'import os\n')] |
import numpy as np
from scipy import optimize
class Parameter:
def __init__(self, value, name=''):
self.value = value
self.name = name
def set(self, value):
self.value = value
def __call__(self):
return self.value
def gauss(gA,gx0,gs):
"""
returns the fit function: two gaussians with independent widths, centres and amplitudes.
A_a * np.exp(-((x-x0_a)/s_a)**2)
"""
A = Parameter(gA,'A')
x0 = Parameter(gx0,'x0')
s = Parameter(gs,'s')
p0 = [A,x0,s]
def fitfunc(x):
return A()*np.exp(-(x-x0())**2/(2*s())**2)
return p0,fitfunc
def result_dict(p1, cov, info, mesg, success, x, y, p0, fitfunc):
chisq = 1
dof = 1
error_dict = {}
error_list = []
params_dict = {}
# print cov, success, mesg, info
if success:
chisq = sum(info['fvec']*info['fvec'])
dof = len(y)-len(p0)
for i,pmin in enumerate(p1):
error_dict[p0[i].name] = np.sqrt(cov[i,i])*np.sqrt(chisq/dof)
#print chisq
#print dof
error_list.append(np.sqrt(cov[i,i])*np.sqrt(chisq/dof))
params_dict[p0[i].name] = pmin
result = {
'success' : success,
'params' : p1,
'params_dict' : params_dict,
'chisq': chisq,
'dof': dof,
'residuals_rms': np.sqrt(chisq/dof),
'reduced_chisq': chisq/dof,
'error' : error_list,
'error_dict' : error_dict,
'cov' : cov,
'p0' : p0,
'fitfunc' : fitfunc,
'x' : x,
'y' : y,
}
return result
def do_fit(x,y,p0,fitfunc):
"""
Fitting function... thanks teamdiamond
"""
# convenient fitting method with parameters; see scipy cookbook for details
def f(params):
i = 0
for p in p0:
p.set(params[i])
i += 1
return y - fitfunc(x)
if x is None: x = arange(y.shape[0])
p = [param() for param in p0]
# do the fit and process
p1, cov, info, mesg, success = optimize.leastsq(f, p, full_output=True, maxfev=len(x)*100)
if not success or cov == None: # FIXME: find a better solution!!!
success = False
print('ERROR: Fit did not converge !')
print('reason: '+str(mesg))
result = result_dict(p1, cov, info, mesg, success, x, y, p0,
fitfunc)
# package the result neatly
return result
def fit_line(ga):
a = Parameter(ga,'a')
p0 = [a]
def fitfunc(x):
return a()*x
return p0,fitfunc
if __name__ == '__main__':
pass | [
"numpy.sqrt"
] | [((1375, 1395), 'numpy.sqrt', 'np.sqrt', (['(chisq / dof)'], {}), '(chisq / dof)\n', (1382, 1395), True, 'import numpy as np\n'), ((1002, 1020), 'numpy.sqrt', 'np.sqrt', (['cov[i, i]'], {}), '(cov[i, i])\n', (1009, 1020), True, 'import numpy as np\n'), ((1020, 1040), 'numpy.sqrt', 'np.sqrt', (['(chisq / dof)'], {}), '(chisq / dof)\n', (1027, 1040), True, 'import numpy as np\n'), ((1120, 1138), 'numpy.sqrt', 'np.sqrt', (['cov[i, i]'], {}), '(cov[i, i])\n', (1127, 1138), True, 'import numpy as np\n'), ((1138, 1158), 'numpy.sqrt', 'np.sqrt', (['(chisq / dof)'], {}), '(chisq / dof)\n', (1145, 1158), True, 'import numpy as np\n')] |
import numpy as np
from glob import glob
import xarray as xr
from argparse import ArgumentParser
import warnings
warnings.filterwarnings("ignore")
# compute climatology for one region
p = ArgumentParser()
p.add_argument('-region', choices=('NPSG','EqPac','SO'), action = "store", dest = "region", help ='region where particles released')
args = p.parse_args()
region = args.region
yr = '2004'
dirread_NEMO = '/data/oceanparcels/input_data/NEMO-MEDUSA/ORCA0083-N006/means/'
dirread_bgc_NEMO = '/data/oceanparcels/input_data/NEMO-MEDUSA_BGC/ORCA0083-N006/means/'
dirread_mesh = '/data/oceanparcels/input_data/NEMO-MEDUSA/ORCA0083-N006/domain/'
ufiles_NEMO = sorted(glob(dirread_NEMO+'ORCA0083-N06_'+yr+'*d05U.nc'))
vfiles_NEMO = sorted(glob(dirread_NEMO+'ORCA0083-N06_'+yr+'*d05V.nc'))
wfiles_NEMO = sorted(glob(dirread_NEMO+'ORCA0083-N06_'+yr+'*d05W.nc'))
pfiles_NEMO = sorted(glob(dirread_bgc_NEMO+'ORCA0083-N06_'+yr+'*d05P.nc'))
ppfiles_NEMO = sorted(glob(dirread_bgc_NEMO+'ORCA0083-N06_'+yr+'*d05D.nc'))
tsfiles_NEMO = sorted(glob(dirread_NEMO+'ORCA0083-N06_'+yr+'*d05T.nc'))
mesh_mask_NEMO = dirread_mesh+'coordinates.nc'
ds_pp_NEMO = xr.open_dataset(ppfiles_NEMO[0])
ds_p_NEMO = xr.open_dataset(pfiles_NEMO[0])
ds_ts_NEMO = xr.open_dataset(tsfiles_NEMO[0])
mesh_mask = xr.open_dataset(mesh_mask_NEMO, decode_times=False)
def getclosest_ij(lats,lons,latpt,lonpt):
"""Function to find the index of the closest point to a certain lon/lat value."""
dist_sq = (lats-latpt)**2 + (lons-lonpt)**2 # find squared distance of every point on grid
minindex_flattened = dist_sq.argmin() # 1D index of minimum dist_sq element
return np.unravel_index(minindex_flattened, lats.shape) # Get 2D index for latvals and lonvals arrays from 1D index
n_res = 10
lat_release_NPSG = np.tile(np.linspace(23,32,n_res),[n_res,1])
lon_release_NPSG = np.tile(np.linspace(-143,-134,n_res),[n_res,1])
lons_NPSG, lats_NPSG = np.meshgrid(lon_release_NPSG, lat_release_NPSG)
lat_release_EqPac = np.tile(np.linspace(-4.5,4.5,n_res),[n_res,1])
lon_release_EqPac = np.tile(np.linspace(-148,-139,n_res),[n_res,1])
lons_EqPac, lats_EqPac = np.meshgrid(lon_release_EqPac, lat_release_EqPac)
lat_release_SO = np.tile(np.linspace(-62,-53,n_res),[n_res,1])
lon_release_SO = np.tile(np.linspace(-115,-106,n_res),[n_res,1])
lons_SO, lats_SO = np.meshgrid(lon_release_SO, lat_release_SO)
lons = {'NPSG': lon_release_NPSG,
'EqPac': lon_release_EqPac,
'SO': lon_release_SO}
lats = {'NPSG': lat_release_NPSG,
'EqPac': lat_release_EqPac,
'SO': lat_release_SO}
iy_min, ix_min = getclosest_ij(mesh_mask['nav_lat'], mesh_mask['nav_lon'], lats[region][0,0], lons[region][0,0])
iy_max, ix_max = getclosest_ij(mesh_mask['nav_lat'], mesh_mask['nav_lon'], lats[region][0,-1], lons[region][0,-1])
D_region = ds_p_NEMO['PHD'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
PP_region = ds_pp_NEMO['TPP3'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
T_region = ds_ts_NEMO['potemp'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
SAL_region = ds_ts_NEMO['salin'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
TAU_region = ds_ts_NEMO['taum'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
MLD_region = ds_ts_NEMO['mldr10_1'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
W10_region = ds_ts_NEMO['sowindsp'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
EZD_region = ds_pp_NEMO['MED_XZE'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
for i, filename in enumerate(ppfiles_NEMO[1:]):
ds_0 = xr.open_dataset(filename)
PP_0 = ds_0['TPP3'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
print(i)
PP_region = xr.concat([PP_region,PP_0], 'time_counter')
for i, filename in enumerate(pfiles_NEMO[1:]):
ds_0 = xr.open_dataset(filename)
D_0 = ds_0['PHD'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
print(i)
D_region = xr.concat([D_region,D_0], 'time_counter')
for i, filename in enumerate(tsfiles_NEMO[1:]):
ds_0 = xr.open_dataset(filename)
MLD_0 = ds_0['mldr10_1'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
print(i)
MLD_region = xr.concat([MLD_region,MLD_0], 'time_counter')
for i, filename in enumerate(tsfiles_NEMO[1:]):
ds_0 = xr.open_dataset(filename)
TAU_0 = ds_0['taum'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
print(i)
TAU_region = xr.concat([TAU_region,TAU_0], 'time_counter')
for i, filename in enumerate(tsfiles_NEMO[1:]):
ds_0 = xr.open_dataset(filename)
W10_0 = ds_0['sowindsp'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
print(i)
W10_region = xr.concat([W10_region,W10_0], 'time_counter')
for i, filename in enumerate(tsfiles_NEMO[1:]):
ds_0 = xr.open_dataset(filename)
T_0 = ds_0['potemp'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
print(i)
T_region = xr.concat([T_region,T_0], 'time_counter')
for i, filename in enumerate(tsfiles_NEMO[1:]):
ds_0 = xr.open_dataset(filename)
SAL_0 = ds_0['salin'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
print(i)
SAL_region = xr.concat([SAL_region,SAL_0], 'time_counter')
for i, filename in enumerate(ppfiles_NEMO[1:]):
ds_0 = xr.open_dataset(filename)
EZD_0 = ds_0['MED_XZE'].isel(y=slice(iy_min,iy_max),x=slice(ix_min,ix_max))
print(i)
EZD_region = xr.concat([EZD_region,EZD_0], 'time_counter')
PP_profile = PP_region.mean('time_counter').mean('y').mean('x')
PP_profile = np.nan_to_num(PP_profile)
D_profile = D_region.mean('time_counter').mean('y').mean('x')
D_profile = np.nan_to_num(D_profile)
Z = - ds_ts_NEMO['deptht']
SA = SAL_region
CT = T_region
SAu = 40 * 35.16504 / 35
CTu = 40
Zu = 1e4
deltaS = 32
R000 = 8.0189615746e+02
R100 = 8.6672408165e+02
R200 = -1.7864682637e+03
R300 = 2.0375295546e+03
R400 = -1.2849161071e+03
R500 = 4.3227585684e+02
R600 = -6.0579916612e+01
R010 = 2.6010145068e+01
R110 = -6.5281885265e+01
R210 = 8.1770425108e+01
R310 = -5.6888046321e+01
R410 = 1.7681814114e+01
R510 = -1.9193502195e+00
R020 = -3.7074170417e+01
R120 = 6.1548258127e+01
R220 = -6.0362551501e+01
R320 = 2.9130021253e+01
R420 = -5.4723692739e+00
R030 = 2.1661789529e+01
R130 = -3.3449108469e+01
R230 = 1.9717078466e+01
R330 = -3.1742946532e+00
R040 = -8.3627885467e+00
R140 = 1.1311538584e+01
R240 = -5.3563304045e+00
R050 = 5.4048723791e-01
R150 = 4.8169980163e-01
R060 = -1.9083568888e-01
R001 = 1.9681925209e+01
R101 = -4.2549998214e+01
R201 = 5.0774768218e+01
R301 = -3.0938076334e+01
R401 = 6.6051753097e+00
R011 = -1.3336301113e+01
R111 = -4.4870114575e+00
R211 = 5.0042598061e+00
R311 = -6.5399043664e-01
R021 = 6.7080479603e+00
R121 = 3.5063081279e+00
R221 = -1.8795372996e+00
R031 = -2.4649669534e+00
R131 = -5.5077101279e-01
R041 = 5.5927935970e-01
R002 = 2.0660924175e+00
R102 = -4.9527603989e+00
R202 = 2.5019633244e+00
R012 = 2.0564311499e+00
R112 = -2.1311365518e-01
R022 = -1.2419983026e+00
R003 = -2.3342758797e-02
R103 = -1.8507636718e-02
R013 = 3.7969820455e-01
ss = np.sqrt((SA + deltaS) / SAu)
tt = CT / CTu
zz = -Z / Zu
rz3 = R013 * tt + R103 * ss + R003
rz2 = (R022 * tt + R112 * ss + R012) * tt + (R202 * ss + R102) * ss + R002
rz1 = (((R041 * tt + R131 * ss + R031) * tt + (R221 * ss + R121) * ss + R021) * tt + ((R311 * ss + R211) * ss + R111) * ss + R011) * tt + (((R401 * ss + R301) * ss + R201) * ss + R101) * ss + R001
rz0 = (((((R060 * tt + R150 * ss + R050) * tt + (R240 * ss + R140) * ss + R040) * tt + ((R330 * ss + R230) * ss + R130) * ss + R030) * tt + (((R420 * ss + R320) * ss + R220) * ss + R120) * ss + R020) * tt + ((((R510 * ss + R410) * ss + R310) * ss + R210) * ss + R110) * ss + R010) * tt + (((((R600 * ss + R500) * ss + R400) * ss + R300) * ss + R200) * ss + R100) * ss + R000
RHO_region = ((rz3 * zz + rz2) * zz + rz1) * zz + rz0
g = 7.32e10/(86400.**2.)
rho_a = 1.22
wave_age = 35
phi = 0.9
vk = 0.4
u_s_a = np.sqrt(TAU_region/rho_a)
u_s_w = np.sqrt(np.divide(TAU_region,RHO_region.isel(deptht=0)))
alpha = (vk * u_s_w) / phi
beta = np.divide((wave_age * u_s_a), W10_region)
z0 = 3.5153e-5 * np.power(beta, -0.42) * np.square(W10_region) / g
KPP_region = alpha * (ds_ts_NEMO['deptht'] + z0) * np.square(1-np.divide(ds_ts_NEMO['deptht'],MLD_region))
Z_region = np.tile(ds_ts_NEMO['deptht'],(KPP_region.shape[2],1))
Z_region = np.tile(Z_region, (KPP_region.shape[1],1,1))
Z_region = np.tile(Z_region, (KPP_region.shape[0],1,1,1))
mld_region = np.tile(np.expand_dims(MLD_region,axis=3), (1,1,1,len(ds_ts_NEMO['deptht'])))
kpp_region = KPP_region.values
kpp_region[Z_region>mld_region] = 0
RHO_profile = RHO_region.mean('time_counter').mean('y').mean('x')
RHO_profile = np.nan_to_num(RHO_profile)
kpp_profile = np.mean(kpp_region, axis=(0,1,2))
climatology = np.array([D_profile, PP_profile, kpp_profile])
np.save('/data/oceanparcels/output_data/data_Delphine/'+region+'_climatology', climatology)
MLD_time = MLD_region.mean('y').mean('x')
np.save('/data/oceanparcels/output_data/data_Delphine/'+region+'_MLD', MLD_time)
EZD_time = EZD_region.mean('y').mean('x')
np.save('/data/oceanparcels/output_data/data_Delphine/'+region+'_EZD', EZD_time) | [
"numpy.tile",
"numpy.mean",
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.divide",
"numpy.power",
"numpy.square",
"xarray.concat",
"numpy.array",
"numpy.linspace",
"glob.glob",
"numpy.unravel_index",
"numpy.save",
"numpy.expand_dims",
"numpy.meshgrid",
"xarray.open_dataset",
"warni... | [((113, 146), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (136, 146), False, 'import warnings\n'), ((190, 206), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (204, 206), False, 'from argparse import ArgumentParser\n'), ((1143, 1175), 'xarray.open_dataset', 'xr.open_dataset', (['ppfiles_NEMO[0]'], {}), '(ppfiles_NEMO[0])\n', (1158, 1175), True, 'import xarray as xr\n'), ((1188, 1219), 'xarray.open_dataset', 'xr.open_dataset', (['pfiles_NEMO[0]'], {}), '(pfiles_NEMO[0])\n', (1203, 1219), True, 'import xarray as xr\n'), ((1233, 1265), 'xarray.open_dataset', 'xr.open_dataset', (['tsfiles_NEMO[0]'], {}), '(tsfiles_NEMO[0])\n', (1248, 1265), True, 'import xarray as xr\n'), ((1279, 1330), 'xarray.open_dataset', 'xr.open_dataset', (['mesh_mask_NEMO'], {'decode_times': '(False)'}), '(mesh_mask_NEMO, decode_times=False)\n', (1294, 1330), True, 'import xarray as xr\n'), ((1963, 2010), 'numpy.meshgrid', 'np.meshgrid', (['lon_release_NPSG', 'lat_release_NPSG'], {}), '(lon_release_NPSG, lat_release_NPSG)\n', (1974, 2010), True, 'import numpy as np\n'), ((2172, 2221), 'numpy.meshgrid', 'np.meshgrid', (['lon_release_EqPac', 'lat_release_EqPac'], {}), '(lon_release_EqPac, lat_release_EqPac)\n', (2183, 2221), True, 'import numpy as np\n'), ((2371, 2414), 'numpy.meshgrid', 'np.meshgrid', (['lon_release_SO', 'lat_release_SO'], {}), '(lon_release_SO, lat_release_SO)\n', (2382, 2414), True, 'import numpy as np\n'), ((5511, 5536), 'numpy.nan_to_num', 'np.nan_to_num', (['PP_profile'], {}), '(PP_profile)\n', (5524, 5536), True, 'import numpy as np\n'), ((5612, 5636), 'numpy.nan_to_num', 'np.nan_to_num', (['D_profile'], {}), '(D_profile)\n', (5625, 5636), True, 'import numpy as np\n'), ((7031, 7059), 'numpy.sqrt', 'np.sqrt', (['((SA + deltaS) / SAu)'], {}), '((SA + deltaS) / SAu)\n', (7038, 7059), True, 'import numpy as np\n'), ((7905, 7932), 'numpy.sqrt', 'np.sqrt', (['(TAU_region / rho_a)'], {}), '(TAU_region / rho_a)\n', (7912, 7932), True, 'import numpy as np\n'), ((8033, 8072), 'numpy.divide', 'np.divide', (['(wave_age * u_s_a)', 'W10_region'], {}), '(wave_age * u_s_a, W10_region)\n', (8042, 8072), True, 'import numpy as np\n'), ((8262, 8317), 'numpy.tile', 'np.tile', (["ds_ts_NEMO['deptht']", '(KPP_region.shape[2], 1)'], {}), "(ds_ts_NEMO['deptht'], (KPP_region.shape[2], 1))\n", (8269, 8317), True, 'import numpy as np\n'), ((8327, 8373), 'numpy.tile', 'np.tile', (['Z_region', '(KPP_region.shape[1], 1, 1)'], {}), '(Z_region, (KPP_region.shape[1], 1, 1))\n', (8334, 8373), True, 'import numpy as np\n'), ((8383, 8432), 'numpy.tile', 'np.tile', (['Z_region', '(KPP_region.shape[0], 1, 1, 1)'], {}), '(Z_region, (KPP_region.shape[0], 1, 1, 1))\n', (8390, 8432), True, 'import numpy as np\n'), ((8671, 8697), 'numpy.nan_to_num', 'np.nan_to_num', (['RHO_profile'], {}), '(RHO_profile)\n', (8684, 8697), True, 'import numpy as np\n'), ((8713, 8748), 'numpy.mean', 'np.mean', (['kpp_region'], {'axis': '(0, 1, 2)'}), '(kpp_region, axis=(0, 1, 2))\n', (8720, 8748), True, 'import numpy as np\n'), ((8762, 8808), 'numpy.array', 'np.array', (['[D_profile, PP_profile, kpp_profile]'], {}), '([D_profile, PP_profile, kpp_profile])\n', (8770, 8808), True, 'import numpy as np\n'), ((8809, 8908), 'numpy.save', 'np.save', (["('/data/oceanparcels/output_data/data_Delphine/' + region + '_climatology')", 'climatology'], {}), "('/data/oceanparcels/output_data/data_Delphine/' + region +\n '_climatology', climatology)\n", (8816, 8908), True, 'import numpy as np\n'), ((8944, 9032), 'numpy.save', 'np.save', (["('/data/oceanparcels/output_data/data_Delphine/' + region + '_MLD')", 'MLD_time'], {}), "('/data/oceanparcels/output_data/data_Delphine/' + region + '_MLD',\n MLD_time)\n", (8951, 9032), True, 'import numpy as np\n'), ((9068, 9156), 'numpy.save', 'np.save', (["('/data/oceanparcels/output_data/data_Delphine/' + region + '_EZD')", 'EZD_time'], {}), "('/data/oceanparcels/output_data/data_Delphine/' + region + '_EZD',\n EZD_time)\n", (9075, 9156), True, 'import numpy as np\n'), ((667, 721), 'glob.glob', 'glob', (["(dirread_NEMO + 'ORCA0083-N06_' + yr + '*d05U.nc')"], {}), "(dirread_NEMO + 'ORCA0083-N06_' + yr + '*d05U.nc')\n", (671, 721), False, 'from glob import glob\n'), ((738, 792), 'glob.glob', 'glob', (["(dirread_NEMO + 'ORCA0083-N06_' + yr + '*d05V.nc')"], {}), "(dirread_NEMO + 'ORCA0083-N06_' + yr + '*d05V.nc')\n", (742, 792), False, 'from glob import glob\n'), ((809, 863), 'glob.glob', 'glob', (["(dirread_NEMO + 'ORCA0083-N06_' + yr + '*d05W.nc')"], {}), "(dirread_NEMO + 'ORCA0083-N06_' + yr + '*d05W.nc')\n", (813, 863), False, 'from glob import glob\n'), ((880, 938), 'glob.glob', 'glob', (["(dirread_bgc_NEMO + 'ORCA0083-N06_' + yr + '*d05P.nc')"], {}), "(dirread_bgc_NEMO + 'ORCA0083-N06_' + yr + '*d05P.nc')\n", (884, 938), False, 'from glob import glob\n'), ((956, 1014), 'glob.glob', 'glob', (["(dirread_bgc_NEMO + 'ORCA0083-N06_' + yr + '*d05D.nc')"], {}), "(dirread_bgc_NEMO + 'ORCA0083-N06_' + yr + '*d05D.nc')\n", (960, 1014), False, 'from glob import glob\n'), ((1032, 1086), 'glob.glob', 'glob', (["(dirread_NEMO + 'ORCA0083-N06_' + yr + '*d05T.nc')"], {}), "(dirread_NEMO + 'ORCA0083-N06_' + yr + '*d05T.nc')\n", (1036, 1086), False, 'from glob import glob\n'), ((1684, 1732), 'numpy.unravel_index', 'np.unravel_index', (['minindex_flattened', 'lats.shape'], {}), '(minindex_flattened, lats.shape)\n', (1700, 1732), True, 'import numpy as np\n'), ((1837, 1863), 'numpy.linspace', 'np.linspace', (['(23)', '(32)', 'n_res'], {}), '(23, 32, n_res)\n', (1848, 1863), True, 'import numpy as np\n'), ((1900, 1930), 'numpy.linspace', 'np.linspace', (['(-143)', '(-134)', 'n_res'], {}), '(-143, -134, n_res)\n', (1911, 1930), True, 'import numpy as np\n'), ((2040, 2069), 'numpy.linspace', 'np.linspace', (['(-4.5)', '(4.5)', 'n_res'], {}), '(-4.5, 4.5, n_res)\n', (2051, 2069), True, 'import numpy as np\n'), ((2107, 2137), 'numpy.linspace', 'np.linspace', (['(-148)', '(-139)', 'n_res'], {}), '(-148, -139, n_res)\n', (2118, 2137), True, 'import numpy as np\n'), ((2249, 2277), 'numpy.linspace', 'np.linspace', (['(-62)', '(-53)', 'n_res'], {}), '(-62, -53, n_res)\n', (2260, 2277), True, 'import numpy as np\n'), ((2312, 2342), 'numpy.linspace', 'np.linspace', (['(-115)', '(-106)', 'n_res'], {}), '(-115, -106, n_res)\n', (2323, 2342), True, 'import numpy as np\n'), ((3585, 3610), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (3600, 3610), True, 'import xarray as xr\n'), ((3716, 3760), 'xarray.concat', 'xr.concat', (['[PP_region, PP_0]', '"""time_counter"""'], {}), "([PP_region, PP_0], 'time_counter')\n", (3725, 3760), True, 'import xarray as xr\n'), ((3819, 3844), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (3834, 3844), True, 'import xarray as xr\n'), ((3947, 3989), 'xarray.concat', 'xr.concat', (['[D_region, D_0]', '"""time_counter"""'], {}), "([D_region, D_0], 'time_counter')\n", (3956, 3989), True, 'import xarray as xr\n'), ((4049, 4074), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (4064, 4074), True, 'import xarray as xr\n'), ((4186, 4232), 'xarray.concat', 'xr.concat', (['[MLD_region, MLD_0]', '"""time_counter"""'], {}), "([MLD_region, MLD_0], 'time_counter')\n", (4195, 4232), True, 'import xarray as xr\n'), ((4292, 4317), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (4307, 4317), True, 'import xarray as xr\n'), ((4425, 4471), 'xarray.concat', 'xr.concat', (['[TAU_region, TAU_0]', '"""time_counter"""'], {}), "([TAU_region, TAU_0], 'time_counter')\n", (4434, 4471), True, 'import xarray as xr\n'), ((4531, 4556), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (4546, 4556), True, 'import xarray as xr\n'), ((4668, 4714), 'xarray.concat', 'xr.concat', (['[W10_region, W10_0]', '"""time_counter"""'], {}), "([W10_region, W10_0], 'time_counter')\n", (4677, 4714), True, 'import xarray as xr\n'), ((4774, 4799), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (4789, 4799), True, 'import xarray as xr\n'), ((4905, 4947), 'xarray.concat', 'xr.concat', (['[T_region, T_0]', '"""time_counter"""'], {}), "([T_region, T_0], 'time_counter')\n", (4914, 4947), True, 'import xarray as xr\n'), ((5007, 5032), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (5022, 5032), True, 'import xarray as xr\n'), ((5141, 5187), 'xarray.concat', 'xr.concat', (['[SAL_region, SAL_0]', '"""time_counter"""'], {}), "([SAL_region, SAL_0], 'time_counter')\n", (5150, 5187), True, 'import xarray as xr\n'), ((5251, 5276), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (5266, 5276), True, 'import xarray as xr\n'), ((5387, 5433), 'xarray.concat', 'xr.concat', (['[EZD_region, EZD_0]', '"""time_counter"""'], {}), "([EZD_region, EZD_0], 'time_counter')\n", (5396, 5433), True, 'import xarray as xr\n'), ((8452, 8486), 'numpy.expand_dims', 'np.expand_dims', (['MLD_region'], {'axis': '(3)'}), '(MLD_region, axis=3)\n', (8466, 8486), True, 'import numpy as np\n'), ((8116, 8137), 'numpy.square', 'np.square', (['W10_region'], {}), '(W10_region)\n', (8125, 8137), True, 'import numpy as np\n'), ((8092, 8113), 'numpy.power', 'np.power', (['beta', '(-0.42)'], {}), '(beta, -0.42)\n', (8100, 8113), True, 'import numpy as np\n'), ((8206, 8249), 'numpy.divide', 'np.divide', (["ds_ts_NEMO['deptht']", 'MLD_region'], {}), "(ds_ts_NEMO['deptht'], MLD_region)\n", (8215, 8249), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for preprocess_utils."""
import numpy as np
import tensorflow as tf
from deeplab2.data.preprocessing import preprocess_utils
class PreprocessUtilsTest(tf.test.TestCase):
def testNoFlipWhenProbIsZero(self):
numpy_image = np.dstack([[[5., 6.],
[9., 0.]],
[[4., 3.],
[3., 5.]]])
image = tf.convert_to_tensor(numpy_image)
actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=0)
self.assertAllEqual(numpy_image, actual)
self.assertFalse(is_flipped)
actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=1)
self.assertAllEqual(numpy_image, actual)
self.assertFalse(is_flipped)
actual, is_flipped = preprocess_utils.flip_dim([image], prob=0, dim=2)
self.assertAllEqual(numpy_image, actual)
self.assertFalse(is_flipped)
def testFlipWhenProbIsOne(self):
numpy_image = np.dstack([[[5., 6.],
[9., 0.]],
[[4., 3.],
[3., 5.]]])
dim0_flipped = np.dstack([[[9., 0.],
[5., 6.]],
[[3., 5.],
[4., 3.]]])
dim1_flipped = np.dstack([[[6., 5.],
[0., 9.]],
[[3., 4.],
[5., 3.]]])
dim2_flipped = np.dstack([[[4., 3.],
[3., 5.]],
[[5., 6.],
[9., 0.]]])
image = tf.convert_to_tensor(numpy_image)
actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=0)
self.assertAllEqual(dim0_flipped, actual)
self.assertTrue(is_flipped)
actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=1)
self.assertAllEqual(dim1_flipped, actual)
self.assertTrue(is_flipped)
actual, is_flipped = preprocess_utils.flip_dim([image], prob=1, dim=2)
self.assertAllEqual(dim2_flipped, actual)
self.assertTrue(is_flipped)
def testFlipMultipleImagesConsistentlyWhenProbIsOne(self):
numpy_image = np.dstack([[[5., 6.],
[9., 0.]],
[[4., 3.],
[3., 5.]]])
numpy_label = np.dstack([[[0., 1.],
[2., 3.]]])
image_dim1_flipped = np.dstack([[[6., 5.],
[0., 9.]],
[[3., 4.],
[5., 3.]]])
label_dim1_flipped = np.dstack([[[1., 0.],
[3., 2.]]])
image = tf.convert_to_tensor(numpy_image)
label = tf.convert_to_tensor(numpy_label)
image, label, is_flipped = preprocess_utils.flip_dim(
[image, label], prob=1, dim=1)
self.assertAllEqual(image_dim1_flipped, image)
self.assertAllEqual(label_dim1_flipped, label)
self.assertTrue(is_flipped)
def testReturnRandomFlipsOnMultipleEvals(self):
numpy_image = np.dstack([[[5., 6.],
[9., 0.]],
[[4., 3.],
[3., 5.]]])
dim1_flipped = np.dstack([[[6., 5.],
[0., 9.]],
[[3., 4.],
[5., 3.]]])
image = tf.convert_to_tensor(numpy_image)
original_image, not_flipped = preprocess_utils.flip_dim(
[image], prob=0, dim=1)
flip_image, is_flipped = preprocess_utils.flip_dim(
[image], prob=1.0, dim=1)
self.assertAllEqual(numpy_image, original_image)
self.assertFalse(not_flipped)
self.assertAllEqual(dim1_flipped, flip_image)
self.assertTrue(is_flipped)
def testReturnCorrectCropOfSingleImage(self):
np.random.seed(0)
height, width = 10, 20
image = np.random.randint(0, 256, size=(height, width, 3))
crop_height, crop_width = 2, 4
[cropped] = preprocess_utils.random_crop([tf.convert_to_tensor(image)],
crop_height,
crop_width)
# Ensure we can find the cropped image in the original:
is_found = False
for x in range(0, width - crop_width + 1):
for y in range(0, height - crop_height + 1):
if np.isclose(image[y:y+crop_height, x:x+crop_width, :],
cropped).all():
is_found = True
break
self.assertTrue(is_found)
def testRandomCropMaintainsNumberOfChannels(self):
np.random.seed(0)
crop_height, crop_width = 10, 20
image = np.random.randint(0, 256, size=(100, 200, 3))
tf.random.set_seed(37)
[cropped] = preprocess_utils.random_crop(
[tf.convert_to_tensor(image)], crop_height, crop_width)
self.assertListEqual(cropped.shape.as_list(), [crop_height, crop_width, 3])
def testReturnDifferentCropAreasOnTwoEvals(self):
tf.random.set_seed(0)
crop_height, crop_width = 2, 3
image = np.random.randint(0, 256, size=(100, 200, 3))
[cropped0] = preprocess_utils.random_crop(
[tf.convert_to_tensor(image)], crop_height, crop_width)
[cropped1] = preprocess_utils.random_crop(
[tf.convert_to_tensor(image)], crop_height, crop_width)
self.assertFalse(np.isclose(cropped0.numpy(), cropped1.numpy()).all())
def testReturnConsistenCropsOfImagesInTheList(self):
tf.random.set_seed(0)
height, width = 10, 20
crop_height, crop_width = 2, 3
labels = np.linspace(0, height * width-1, height * width)
labels = labels.reshape((height, width, 1))
image = np.tile(labels, (1, 1, 3))
[cropped_image, cropped_label] = preprocess_utils.random_crop(
[tf.convert_to_tensor(image), tf.convert_to_tensor(labels)],
crop_height, crop_width)
for i in range(3):
self.assertAllEqual(cropped_image[:, :, i], tf.squeeze(cropped_label))
def testDieOnRandomCropWhenImagesWithDifferentWidth(self):
crop_height, crop_width = 2, 3
image1 = tf.convert_to_tensor(np.random.rand(4, 5, 3))
image2 = tf.convert_to_tensor(np.random.rand(4, 6, 1))
with self.assertRaises(tf.errors.InvalidArgumentError):
_ = preprocess_utils.random_crop([image1, image2], crop_height,
crop_width)
def testDieOnRandomCropWhenImagesWithDifferentHeight(self):
crop_height, crop_width = 2, 3
image1 = tf.convert_to_tensor(np.random.rand(4, 5, 3))
image2 = tf.convert_to_tensor(np.random.rand(5, 5, 1))
with self.assertRaises(tf.errors.InvalidArgumentError):
_ = preprocess_utils.random_crop([image1, image2], crop_height,
crop_width)
def testDieOnRandomCropWhenCropSizeIsGreaterThanImage(self):
crop_height, crop_width = 5, 9
image1 = tf.convert_to_tensor(np.random.rand(4, 5, 3))
image2 = tf.convert_to_tensor(np.random.rand(4, 5, 1))
with self.assertRaises(tf.errors.InvalidArgumentError):
_ = preprocess_utils.random_crop([image1, image2], crop_height,
crop_width)
def testRandomScaleFitsInRange(self):
scale_value = preprocess_utils.get_random_scale(1., 2., 0.)
self.assertGreaterEqual(scale_value, 1.)
self.assertLessEqual(scale_value, 2.)
def testDeterminedRandomScaleReturnsNumber(self):
scale = preprocess_utils.get_random_scale(1., 1., 0.)
self.assertEqual(scale, 1.)
def testResizeTensorsToRange(self):
test_shapes = [[60, 40],
[15, 30],
[15, 50]]
min_size = 50
max_size = 100
factor = None
expected_shape_list = [(75, 50, 3),
(50, 100, 3),
(30, 100, 3)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=None,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
self.assertEqual(new_tensor_list[0].shape, expected_shape_list[i])
def testResizeTensorsToRangeWithFactor(self):
test_shapes = [[60, 40],
[15, 30],
[15, 50]]
min_size = 50
max_size = 98
factor = 8
expected_image_shape_list = [(81, 57, 3),
(49, 97, 3),
(33, 97, 3)]
expected_label_shape_list = [(81, 57, 1),
(49, 97, 1),
(33, 97, 1)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i])
def testResizeTensorsToRangeWithSimilarMinMaxSizes(self):
test_shapes = [[60, 40],
[15, 30],
[15, 50]]
# Values set so that one of the side = 97.
min_size = 96
max_size = 98
factor = 8
expected_image_shape_list = [(97, 65, 3),
(49, 97, 3),
(33, 97, 3)]
expected_label_shape_list = [(97, 65, 1),
(49, 97, 1),
(33, 97, 1)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i])
def testResizeTensorsToRangeWithEqualMaxSize(self):
test_shapes = [[97, 38],
[96, 97]]
# Make max_size equal to the larger value of test_shapes.
min_size = 97
max_size = 97
factor = 8
expected_image_shape_list = [(97, 41, 3),
(97, 97, 3)]
expected_label_shape_list = [(97, 41, 1),
(97, 97, 1)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
self.assertEqual(new_tensor_list[1].shape, expected_label_shape_list[i])
def testResizeTensorsToRangeWithPotentialErrorInTFCeil(self):
test_shape = [3936, 5248]
# Make max_size equal to the larger value of test_shapes.
min_size = 1441
max_size = 1441
factor = 16
expected_image_shape = (1089, 1441, 3)
expected_label_shape = (1089, 1441, 1)
image = tf.random.normal([test_shape[0], test_shape[1], 3])
label = tf.random.normal([test_shape[0], test_shape[1], 1])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=label,
min_size=min_size,
max_size=max_size,
factor=factor,
align_corners=True)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape)
self.assertEqual(new_tensor_list[1].shape, expected_label_shape)
def testResizeTensorWithOnlyMaxSize(self):
test_shapes = [[97, 38],
[96, 18]]
max_size = (97, 28)
# Since the second test shape already fits max size, do nothing.
expected_image_shape_list = [(71, 28, 3),
(96, 18, 3)]
for i, test_shape in enumerate(test_shapes):
image = tf.random.normal([test_shape[0], test_shape[1], 3])
new_tensor_list = preprocess_utils.resize_to_range(
image=image,
label=None,
min_size=None,
max_size=max_size,
align_corners=True)
self.assertEqual(new_tensor_list[0].shape, expected_image_shape_list[i])
if __name__ == '__main__':
tf.test.main()
| [
"numpy.dstack",
"numpy.tile",
"deeplab2.data.preprocessing.preprocess_utils.get_random_scale",
"tensorflow.random.normal",
"tensorflow.random.set_seed",
"numpy.random.rand",
"deeplab2.data.preprocessing.preprocess_utils.resize_to_range",
"numpy.isclose",
"tensorflow.test.main",
"numpy.random.randi... | [((13336, 13350), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (13348, 13350), True, 'import tensorflow as tf\n'), ((840, 903), 'numpy.dstack', 'np.dstack', (['[[[5.0, 6.0], [9.0, 0.0]], [[4.0, 3.0], [3.0, 5.0]]]'], {}), '([[[5.0, 6.0], [9.0, 0.0]], [[4.0, 3.0], [3.0, 5.0]]])\n', (849, 903), True, 'import numpy as np\n'), ((997, 1030), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['numpy_image'], {}), '(numpy_image)\n', (1017, 1030), True, 'import tensorflow as tf\n'), ((1057, 1106), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image]'], {'prob': '(0)', 'dim': '(0)'}), '([image], prob=0, dim=0)\n', (1082, 1106), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((1210, 1259), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image]'], {'prob': '(0)', 'dim': '(1)'}), '([image], prob=0, dim=1)\n', (1235, 1259), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((1363, 1412), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image]'], {'prob': '(0)', 'dim': '(2)'}), '([image], prob=0, dim=2)\n', (1388, 1412), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((1545, 1608), 'numpy.dstack', 'np.dstack', (['[[[5.0, 6.0], [9.0, 0.0]], [[4.0, 3.0], [3.0, 5.0]]]'], {}), '([[[5.0, 6.0], [9.0, 0.0]], [[4.0, 3.0], [3.0, 5.0]]])\n', (1554, 1608), True, 'import numpy as np\n'), ((1709, 1772), 'numpy.dstack', 'np.dstack', (['[[[9.0, 0.0], [5.0, 6.0]], [[3.0, 5.0], [4.0, 3.0]]]'], {}), '([[[9.0, 0.0], [5.0, 6.0]], [[3.0, 5.0], [4.0, 3.0]]])\n', (1718, 1772), True, 'import numpy as np\n'), ((1876, 1939), 'numpy.dstack', 'np.dstack', (['[[[6.0, 5.0], [0.0, 9.0]], [[3.0, 4.0], [5.0, 3.0]]]'], {}), '([[[6.0, 5.0], [0.0, 9.0]], [[3.0, 4.0], [5.0, 3.0]]])\n', (1885, 1939), True, 'import numpy as np\n'), ((2043, 2106), 'numpy.dstack', 'np.dstack', (['[[[4.0, 3.0], [3.0, 5.0]], [[5.0, 6.0], [9.0, 0.0]]]'], {}), '([[[4.0, 3.0], [3.0, 5.0]], [[5.0, 6.0], [9.0, 0.0]]])\n', (2052, 2106), True, 'import numpy as np\n'), ((2203, 2236), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['numpy_image'], {}), '(numpy_image)\n', (2223, 2236), True, 'import tensorflow as tf\n'), ((2263, 2312), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image]'], {'prob': '(1)', 'dim': '(0)'}), '([image], prob=1, dim=0)\n', (2288, 2312), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((2416, 2465), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image]'], {'prob': '(1)', 'dim': '(1)'}), '([image], prob=1, dim=1)\n', (2441, 2465), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((2569, 2618), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image]'], {'prob': '(1)', 'dim': '(2)'}), '([image], prob=1, dim=2)\n', (2594, 2618), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((2777, 2840), 'numpy.dstack', 'np.dstack', (['[[[5.0, 6.0], [9.0, 0.0]], [[4.0, 3.0], [3.0, 5.0]]]'], {}), '([[[5.0, 6.0], [9.0, 0.0]], [[4.0, 3.0], [3.0, 5.0]]])\n', (2786, 2840), True, 'import numpy as np\n'), ((2940, 2977), 'numpy.dstack', 'np.dstack', (['[[[0.0, 1.0], [2.0, 3.0]]]'], {}), '([[[0.0, 1.0], [2.0, 3.0]]])\n', (2949, 2977), True, 'import numpy as np\n'), ((3029, 3092), 'numpy.dstack', 'np.dstack', (['[[[6.0, 5.0], [0.0, 9.0]], [[3.0, 4.0], [5.0, 3.0]]]'], {}), '([[[6.0, 5.0], [0.0, 9.0]], [[3.0, 4.0], [5.0, 3.0]]])\n', (3038, 3092), True, 'import numpy as np\n'), ((3220, 3257), 'numpy.dstack', 'np.dstack', (['[[[1.0, 0.0], [3.0, 2.0]]]'], {}), '([[[1.0, 0.0], [3.0, 2.0]]])\n', (3229, 3257), True, 'import numpy as np\n'), ((3303, 3336), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['numpy_image'], {}), '(numpy_image)\n', (3323, 3336), True, 'import tensorflow as tf\n'), ((3349, 3382), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['numpy_label'], {}), '(numpy_label)\n', (3369, 3382), True, 'import tensorflow as tf\n'), ((3415, 3471), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image, label]'], {'prob': '(1)', 'dim': '(1)'}), '([image, label], prob=1, dim=1)\n', (3440, 3471), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((3684, 3747), 'numpy.dstack', 'np.dstack', (['[[[5.0, 6.0], [9.0, 0.0]], [[4.0, 3.0], [3.0, 5.0]]]'], {}), '([[[5.0, 6.0], [9.0, 0.0]], [[4.0, 3.0], [3.0, 5.0]]])\n', (3693, 3747), True, 'import numpy as np\n'), ((3848, 3911), 'numpy.dstack', 'np.dstack', (['[[[6.0, 5.0], [0.0, 9.0]], [[3.0, 4.0], [5.0, 3.0]]]'], {}), '([[[6.0, 5.0], [0.0, 9.0]], [[3.0, 4.0], [5.0, 3.0]]])\n', (3857, 3911), True, 'import numpy as np\n'), ((4008, 4041), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['numpy_image'], {}), '(numpy_image)\n', (4028, 4041), True, 'import tensorflow as tf\n'), ((4076, 4125), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image]'], {'prob': '(0)', 'dim': '(1)'}), '([image], prob=0, dim=1)\n', (4101, 4125), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((4164, 4215), 'deeplab2.data.preprocessing.preprocess_utils.flip_dim', 'preprocess_utils.flip_dim', (['[image]'], {'prob': '(1.0)', 'dim': '(1)'}), '([image], prob=1.0, dim=1)\n', (4189, 4215), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((4447, 4464), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4461, 4464), True, 'import numpy as np\n'), ((4505, 4555), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(height, width, 3)'}), '(0, 256, size=(height, width, 3))\n', (4522, 4555), True, 'import numpy as np\n'), ((5198, 5215), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5212, 5215), True, 'import numpy as np\n'), ((5266, 5311), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(100, 200, 3)'}), '(0, 256, size=(100, 200, 3))\n', (5283, 5311), True, 'import numpy as np\n'), ((5317, 5339), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(37)'], {}), '(37)\n', (5335, 5339), True, 'import tensorflow as tf\n'), ((5588, 5609), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (5606, 5609), True, 'import tensorflow as tf\n'), ((5658, 5703), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(100, 200, 3)'}), '(0, 256, size=(100, 200, 3))\n', (5675, 5703), True, 'import numpy as np\n'), ((6062, 6083), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (6080, 6083), True, 'import tensorflow as tf\n'), ((6160, 6210), 'numpy.linspace', 'np.linspace', (['(0)', '(height * width - 1)', '(height * width)'], {}), '(0, height * width - 1, height * width)\n', (6171, 6210), True, 'import numpy as np\n'), ((6269, 6295), 'numpy.tile', 'np.tile', (['labels', '(1, 1, 3)'], {}), '(labels, (1, 1, 3))\n', (6276, 6295), True, 'import numpy as np\n'), ((7820, 7868), 'deeplab2.data.preprocessing.preprocess_utils.get_random_scale', 'preprocess_utils.get_random_scale', (['(1.0)', '(2.0)', '(0.0)'], {}), '(1.0, 2.0, 0.0)\n', (7853, 7868), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((8018, 8066), 'deeplab2.data.preprocessing.preprocess_utils.get_random_scale', 'preprocess_utils.get_random_scale', (['(1.0)', '(1.0)', '(0.0)'], {}), '(1.0, 1.0, 0.0)\n', (8051, 8066), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((12177, 12228), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 3]'], {}), '([test_shape[0], test_shape[1], 3])\n', (12193, 12228), True, 'import tensorflow as tf\n'), ((12241, 12292), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 1]'], {}), '([test_shape[0], test_shape[1], 1])\n', (12257, 12292), True, 'import tensorflow as tf\n'), ((12315, 12451), 'deeplab2.data.preprocessing.preprocess_utils.resize_to_range', 'preprocess_utils.resize_to_range', ([], {'image': 'image', 'label': 'label', 'min_size': 'min_size', 'max_size': 'max_size', 'factor': 'factor', 'align_corners': '(True)'}), '(image=image, label=label, min_size=\n min_size, max_size=max_size, factor=factor, align_corners=True)\n', (12347, 12451), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((6698, 6721), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)', '(3)'], {}), '(4, 5, 3)\n', (6712, 6721), True, 'import numpy as np\n'), ((6757, 6780), 'numpy.random.rand', 'np.random.rand', (['(4)', '(6)', '(1)'], {}), '(4, 6, 1)\n', (6771, 6780), True, 'import numpy as np\n'), ((6853, 6924), 'deeplab2.data.preprocessing.preprocess_utils.random_crop', 'preprocess_utils.random_crop', (['[image1, image2]', 'crop_height', 'crop_width'], {}), '([image1, image2], crop_height, crop_width)\n', (6881, 6924), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((7096, 7119), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)', '(3)'], {}), '(4, 5, 3)\n', (7110, 7119), True, 'import numpy as np\n'), ((7155, 7178), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)', '(1)'], {}), '(5, 5, 1)\n', (7169, 7178), True, 'import numpy as np\n'), ((7251, 7322), 'deeplab2.data.preprocessing.preprocess_utils.random_crop', 'preprocess_utils.random_crop', (['[image1, image2]', 'crop_height', 'crop_width'], {}), '([image1, image2], crop_height, crop_width)\n', (7279, 7322), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((7495, 7518), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)', '(3)'], {}), '(4, 5, 3)\n', (7509, 7518), True, 'import numpy as np\n'), ((7554, 7577), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)', '(1)'], {}), '(4, 5, 1)\n', (7568, 7577), True, 'import numpy as np\n'), ((7650, 7721), 'deeplab2.data.preprocessing.preprocess_utils.random_crop', 'preprocess_utils.random_crop', (['[image1, image2]', 'crop_height', 'crop_width'], {}), '([image1, image2], crop_height, crop_width)\n', (7678, 7721), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((8462, 8513), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 3]'], {}), '([test_shape[0], test_shape[1], 3])\n', (8478, 8513), True, 'import tensorflow as tf\n'), ((8538, 8672), 'deeplab2.data.preprocessing.preprocess_utils.resize_to_range', 'preprocess_utils.resize_to_range', ([], {'image': 'image', 'label': 'None', 'min_size': 'min_size', 'max_size': 'max_size', 'factor': 'factor', 'align_corners': '(True)'}), '(image=image, label=None, min_size=min_size,\n max_size=max_size, factor=factor, align_corners=True)\n', (8570, 8672), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((9329, 9380), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 3]'], {}), '([test_shape[0], test_shape[1], 3])\n', (9345, 9380), True, 'import tensorflow as tf\n'), ((9395, 9446), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 1]'], {}), '([test_shape[0], test_shape[1], 1])\n', (9411, 9446), True, 'import tensorflow as tf\n'), ((9471, 9607), 'deeplab2.data.preprocessing.preprocess_utils.resize_to_range', 'preprocess_utils.resize_to_range', ([], {'image': 'image', 'label': 'label', 'min_size': 'min_size', 'max_size': 'max_size', 'factor': 'factor', 'align_corners': '(True)'}), '(image=image, label=label, min_size=\n min_size, max_size=max_size, factor=factor, align_corners=True)\n', (9503, 9607), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((10407, 10458), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 3]'], {}), '([test_shape[0], test_shape[1], 3])\n', (10423, 10458), True, 'import tensorflow as tf\n'), ((10473, 10524), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 1]'], {}), '([test_shape[0], test_shape[1], 1])\n', (10489, 10524), True, 'import tensorflow as tf\n'), ((10549, 10685), 'deeplab2.data.preprocessing.preprocess_utils.resize_to_range', 'preprocess_utils.resize_to_range', ([], {'image': 'image', 'label': 'label', 'min_size': 'min_size', 'max_size': 'max_size', 'factor': 'factor', 'align_corners': '(True)'}), '(image=image, label=label, min_size=\n min_size, max_size=max_size, factor=factor, align_corners=True)\n', (10581, 10685), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((11373, 11424), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 3]'], {}), '([test_shape[0], test_shape[1], 3])\n', (11389, 11424), True, 'import tensorflow as tf\n'), ((11439, 11490), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 1]'], {}), '([test_shape[0], test_shape[1], 1])\n', (11455, 11490), True, 'import tensorflow as tf\n'), ((11515, 11651), 'deeplab2.data.preprocessing.preprocess_utils.resize_to_range', 'preprocess_utils.resize_to_range', ([], {'image': 'image', 'label': 'label', 'min_size': 'min_size', 'max_size': 'max_size', 'factor': 'factor', 'align_corners': '(True)'}), '(image=image, label=label, min_size=\n min_size, max_size=max_size, factor=factor, align_corners=True)\n', (11547, 11651), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((12987, 13038), 'tensorflow.random.normal', 'tf.random.normal', (['[test_shape[0], test_shape[1], 3]'], {}), '([test_shape[0], test_shape[1], 3])\n', (13003, 13038), True, 'import tensorflow as tf\n'), ((13063, 13178), 'deeplab2.data.preprocessing.preprocess_utils.resize_to_range', 'preprocess_utils.resize_to_range', ([], {'image': 'image', 'label': 'None', 'min_size': 'None', 'max_size': 'max_size', 'align_corners': '(True)'}), '(image=image, label=None, min_size=None,\n max_size=max_size, align_corners=True)\n', (13095, 13178), False, 'from deeplab2.data.preprocessing import preprocess_utils\n'), ((4639, 4666), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (4659, 4666), True, 'import tensorflow as tf\n'), ((5395, 5422), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (5415, 5422), True, 'import tensorflow as tf\n'), ((5760, 5787), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (5780, 5787), True, 'import tensorflow as tf\n'), ((5871, 5898), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (5891, 5898), True, 'import tensorflow as tf\n'), ((6373, 6400), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (6393, 6400), True, 'import tensorflow as tf\n'), ((6402, 6430), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['labels'], {}), '(labels)\n', (6422, 6430), True, 'import tensorflow as tf\n'), ((6540, 6565), 'tensorflow.squeeze', 'tf.squeeze', (['cropped_label'], {}), '(cropped_label)\n', (6550, 6565), True, 'import tensorflow as tf\n'), ((4975, 5041), 'numpy.isclose', 'np.isclose', (['image[y:y + crop_height, x:x + crop_width, :]', 'cropped'], {}), '(image[y:y + crop_height, x:x + crop_width, :], cropped)\n', (4985, 5041), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
class EventNode:
"""
Base Class for behavior log linked list:
example:
------
from behaviors import BehaviorMat
code_map = BehaviorMat.code_map
eventlist = PSENode(None, None, None, None)
import h5py
hfile = h5py.File("D1-R35-RV_p155_raw_behavior.mat",'r')
trial_event_mat = np.array(hfile['out/trial_event_mat'])
for i in range(len(trial_event_mat)):
eventcode, etime, trial = trial_event_mat[i, :]
eventlist.append(PSENode(code_map[eventcode][0] + '|' + code_map[eventcode][1], etime, trial,
eventcode))
eventlist.as_df()
----
Now you have a eventlist full of nodes
call: eventlist.as_df() to get the dataframe
"""
ABBR = {}
def __init__(self, event, etime, trial, ecode):
self.serializable = ['event', 'etime', 'trial', 'ecode']
if event is None:
# Implements a circular LinkedList
self.is_sentinel = True
self.next = self
self.prev = self
self.size = 0
else:
self.is_sentinel = False
self.event = event
self.etime = etime
self.trial = trial
self.ecode = ecode
# self.trial_start = False # Uncomment this if needed
def __str__(self):
if self.is_sentinel:
return 'Sentinel'
return f"{type(self).__name__}({self.event}, {self.trial}, {self.etime:.1f}ms, {self.ecode})"
def trial_index(self):
# 0.5 is ITI but considered in trial 0
if self.is_sentinel:
return None
else:
return int(np.ceil(self.trial)) - 1
# Methods Reserved For Sentinel Node
def __len__(self):
assert self.is_sentinel, 'must be sentinel node to do this'
return self.size
def __iter__(self):
assert self.is_sentinel, 'must be sentinel node to do this'
curr = self.next
while not curr.is_sentinel:
yield curr
curr = curr.next
def as_df(self, use_abbr=False):
# Returns an dataframe representation of the information
assert self.is_sentinel, 'must be sentinel node to do this'
if use_abbr:
results = [None] * len(self)
node_list = self.tolist()
for i in range(len(self)):
results[i] = [None] * len(self.serializable)
for j in range(len(self.serializable)):
field = self.serializable[j]
attr = getattr(node_list[i], field)
results[i][j] = self.ABBR[attr] if attr in self.ABBR else attr
return pd.DataFrame([[getattr(enode, field) for field in self.serializable] for
enode in self],
columns=self.serializable)
else:
return pd.DataFrame([[getattr(enode, field) for field in self.serializable] for enode in self],
columns=self.serializable)
def nodelist_asdf(self, nodelist):
# a method that looks at a restricted view of eventlist
return pd.DataFrame([[getattr(enode, field) for field in self.serializable] for enode in nodelist],
columns=self.serializable)
# ideally add iter method but not necessary
def tolist(self):
assert self.is_sentinel, 'must be sentinel node to do this'
return [enode for enode in self]
def append(self, node):
assert self.is_sentinel, 'must be sentinel node to do this'
old_end = self.prev
assert old_end.next is self, "what is happening"
old_end.next = node
node.prev = old_end
self.prev = node
node.next = self
self.size += 1
return node
def prepend(self, node):
# Not important
assert self.is_sentinel, 'must be sentinel node to do this'
old_first = self.next
old_first.prev = node
self.next = node
node.prev = self
node.next = old_first
self.size += 1
return node
def remove_node(self, node):
assert self.is_sentinel, 'must be sentinel node to do this'
assert self.size, 'list must be non-empty'
next_node = node.next
prev_node = node.prev
prev_node.next = next_node
next_node.prev = prev_node
node.next = None
node.prev = None
self.size -= 1
def swap_nodes(self, node1, node2):
assert self.is_sentinel, 'must be sentinel node to do this'
assert (not (node1.is_sentinel or node2.is_sentinel)), 'both have to be non-sentinels'
first_prev = node1.prev
sec_next = node2.next
first_prev.next = node2
node2.prev = first_prev
node2.next = node1
node1.prev = node2
node1.next = sec_next
sec_next.prev = node1
def get_last(self):
assert self.is_sentinel, 'must be sentinel node to do this'
return self.prev
def get_first(self):
assert self.is_sentinel, 'must be sentinel node to do this'
return self.next
def is_empty(self):
assert self.is_sentinel, 'must be sentinel node to do this'
return self.size == 0
class PSENode(EventNode):
# Probswitch Event Node
ABBR = {
'right': 'RT',
'left': 'LT',
'ipsi': 'IP',
'contra': 'CT',
'center': 'CE',
}
def __init__(self, event, etime, trial, ecode):
super().__init__(event, etime, trial, ecode)
self.serializable = self.serializable + ['saliency']
self.saliency = None
| [
"numpy.ceil"
] | [((1642, 1661), 'numpy.ceil', 'np.ceil', (['self.trial'], {}), '(self.trial)\n', (1649, 1661), True, 'import numpy as np\n')] |
# Borrowed from https://gitlab.tiker.net/jdsteve2/perflex
import pyopencl as cl
import loopy as lp
import time
import numpy as np
def time_knl(knl, ctx, param_dict):
def create_rand_args(ctx, knl, param_dict):
queue = cl.CommandQueue(ctx)
info = lp.generate_code_v2(knl).implemented_data_info
args, arg_data = lp.auto_test.make_ref_args(
knl,
info,
queue, param_dict)
args.clear()
del args
rand_args = lp.auto_test.make_args(knl, info,
queue, arg_data, param_dict)
del arg_data[:]
del arg_data
return rand_args
queue = cl.CommandQueue(ctx)
trial_wtimes = []
arg_arrays = create_rand_args(ctx, knl, param_dict)
knl = lp.set_options(knl, no_numpy=True)
for t in range(2 + 3):
queue.finish()
tstart = time.time()
evt, out = knl(queue, **arg_arrays)
queue.finish()
tend = time.time()
trial_wtimes.append(tend-tstart)
return np.average(trial_wtimes[2:])
| [
"loopy.auto_test.make_ref_args",
"loopy.auto_test.make_args",
"numpy.average",
"pyopencl.CommandQueue",
"loopy.generate_code_v2",
"loopy.set_options",
"time.time"
] | [((666, 686), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (681, 686), True, 'import pyopencl as cl\n'), ((775, 809), 'loopy.set_options', 'lp.set_options', (['knl'], {'no_numpy': '(True)'}), '(knl, no_numpy=True)\n', (789, 809), True, 'import loopy as lp\n'), ((1035, 1063), 'numpy.average', 'np.average', (['trial_wtimes[2:]'], {}), '(trial_wtimes[2:])\n', (1045, 1063), True, 'import numpy as np\n'), ((231, 251), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (246, 251), True, 'import pyopencl as cl\n'), ((339, 395), 'loopy.auto_test.make_ref_args', 'lp.auto_test.make_ref_args', (['knl', 'info', 'queue', 'param_dict'], {}), '(knl, info, queue, param_dict)\n', (365, 395), True, 'import loopy as lp\n'), ((503, 565), 'loopy.auto_test.make_args', 'lp.auto_test.make_args', (['knl', 'info', 'queue', 'arg_data', 'param_dict'], {}), '(knl, info, queue, arg_data, param_dict)\n', (525, 565), True, 'import loopy as lp\n'), ((877, 888), 'time.time', 'time.time', ([], {}), '()\n', (886, 888), False, 'import time\n'), ((971, 982), 'time.time', 'time.time', ([], {}), '()\n', (980, 982), False, 'import time\n'), ((267, 291), 'loopy.generate_code_v2', 'lp.generate_code_v2', (['knl'], {}), '(knl)\n', (286, 291), True, 'import loopy as lp\n')] |
# Each segment has another segment of the image showing (not black)
# As if you are slowly lookinng at someone's face from above
# Segment numbers are as follows:
# 1. Forehead (dowm to eyebrows)
# 2. Eyebrows (down to eyes)
# 3. Eyes
# 4. Nose
# 5. Mouth
# 6. Chin
# 7. Full
import logging
import numpy as np
import os
import pandas as pd
import cv2
from tqdm import tqdm
import segments_helpers as helpers
from datetime import date
today = date.today()
# yy-mm-dd
today = today.strftime("%y-%m-%d")
# global variables
BLACK = [0,0,0]
IMAGE_HEIGHT = 218
IMAGE_WIDTH = 178
landmarks_file_name = "/home/nthom/Documents/datasets/UNR_Facial_Attribute_Parsing_Dataset/landmarks.csv"
path_to_images = "/home/nthom/Documents/datasets/CelebA/Img/img_celeba/"
path_to_masks = "./data/segmented_images/"
path_to_corners = "/home/nthom/Documents/CelebA_Partial_Blackout/crop_points.csv"
# create the directory to store the images if it doesn't exist
if not os.access(path_to_masks, os.F_OK):
os.mkdir(path_to_masks)
# get a sorted list of the images in the given directory
image_name_list = sorted(os.listdir(path_to_images))
# get the landmarks
landmarks_df = pd.read_csv(landmarks_file_name, index_col="image_name")
landmarks_df_head = landmarks_df.head
print(f"Landmarks Dataframe Head:\n {landmarks_df_head} \n")
# get the bounding boxes
corners_df = pd.read_csv(path_to_corners, index_col=0)
print(f"Bounding Boxes Dataframe Head: \n {corners_df.head}")
print(f"Bounding Boxes Dataframe headers: {corners_df.columns}")
# change this list if only certain segments are desired
segment_numbers = [1, 2, 3, 4, 5, 6, 7]
for image_name in tqdm(image_name_list):
# Sanity checks
landmark_exists = image_name in landmarks_df.index
corners_exists = image_name in corners_df.index
if not landmark_exists:
print(f"Image {image_name} not found in landmarks file!\n")
if not corners_exists:
print(f"Image {image_name} not found in corners file!\n")
if landmark_exists and corners_exists:
# get the landmarks
landmarks = helpers.top_to_bottom_segments(image_name, landmarks_df)
# get the info about the bounding box
x_min, x_max, y_min, y_max = corners_df.loc[image_name]
landmarks = np.append(landmarks, [y_max])
# create the segments
for segment_number in segment_numbers:
# get the image
img = cv2.imread(path_to_images + image_name)
# create the segment directory to store the images if it doesn't exist
directory = "segment" + str(segment_number)
if not os.access(path_to_masks + directory, os.F_OK):
os.mkdir(path_to_masks + directory)
# The image will be black from the landmark point down to the bottom
landmark = landmarks[segment_number - 1]
logging.info(f"The landmark for segment {segment_number} is {landmark}.")
img[landmark:, :] = BLACK
# crop the image
cropped_img = img[y_min:y_max, x_min:x_max]
cropped_img = cv2.resize(cropped_img, (178, 218))
# save the cropped image
if cv2.imwrite(path_to_masks + directory + "/segment" + str(segment_number) + "_" + image_name, cropped_img) == False:
print(f"Image {segment_number}_{image_name} not saved successfully!")
| [
"os.listdir",
"cv2.resize",
"pandas.read_csv",
"tqdm.tqdm",
"os.access",
"logging.info",
"numpy.append",
"segments_helpers.top_to_bottom_segments",
"os.mkdir",
"datetime.date.today",
"cv2.imread"
] | [((445, 457), 'datetime.date.today', 'date.today', ([], {}), '()\n', (455, 457), False, 'from datetime import date\n'), ((1164, 1220), 'pandas.read_csv', 'pd.read_csv', (['landmarks_file_name'], {'index_col': '"""image_name"""'}), "(landmarks_file_name, index_col='image_name')\n", (1175, 1220), True, 'import pandas as pd\n'), ((1359, 1400), 'pandas.read_csv', 'pd.read_csv', (['path_to_corners'], {'index_col': '(0)'}), '(path_to_corners, index_col=0)\n', (1370, 1400), True, 'import pandas as pd\n'), ((1645, 1666), 'tqdm.tqdm', 'tqdm', (['image_name_list'], {}), '(image_name_list)\n', (1649, 1666), False, 'from tqdm import tqdm\n'), ((954, 987), 'os.access', 'os.access', (['path_to_masks', 'os.F_OK'], {}), '(path_to_masks, os.F_OK)\n', (963, 987), False, 'import os\n'), ((993, 1016), 'os.mkdir', 'os.mkdir', (['path_to_masks'], {}), '(path_to_masks)\n', (1001, 1016), False, 'import os\n'), ((1100, 1126), 'os.listdir', 'os.listdir', (['path_to_images'], {}), '(path_to_images)\n', (1110, 1126), False, 'import os\n'), ((2077, 2133), 'segments_helpers.top_to_bottom_segments', 'helpers.top_to_bottom_segments', (['image_name', 'landmarks_df'], {}), '(image_name, landmarks_df)\n', (2107, 2133), True, 'import segments_helpers as helpers\n'), ((2267, 2296), 'numpy.append', 'np.append', (['landmarks', '[y_max]'], {}), '(landmarks, [y_max])\n', (2276, 2296), True, 'import numpy as np\n'), ((2422, 2461), 'cv2.imread', 'cv2.imread', (['(path_to_images + image_name)'], {}), '(path_to_images + image_name)\n', (2432, 2461), False, 'import cv2\n'), ((2867, 2940), 'logging.info', 'logging.info', (['f"""The landmark for segment {segment_number} is {landmark}."""'], {}), "(f'The landmark for segment {segment_number} is {landmark}.')\n", (2879, 2940), False, 'import logging\n'), ((3114, 3149), 'cv2.resize', 'cv2.resize', (['cropped_img', '(178, 218)'], {}), '(cropped_img, (178, 218))\n', (3124, 3149), False, 'import cv2\n'), ((2621, 2666), 'os.access', 'os.access', (['(path_to_masks + directory)', 'os.F_OK'], {}), '(path_to_masks + directory, os.F_OK)\n', (2630, 2666), False, 'import os\n'), ((2684, 2719), 'os.mkdir', 'os.mkdir', (['(path_to_masks + directory)'], {}), '(path_to_masks + directory)\n', (2692, 2719), False, 'import os\n')] |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import scipy.stats
import sys
from tqdm import tqdm
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
print(len(sys.argv))
filenames = sys.argv[1:]
print(filenames)
datas = []
for filename in tqdm(filenames):
print("Reading the file: {}".format(filename))
datas.append(np.load(filename))
fig, axs = plt.subplots(len(filenames), sharex=True, sharey=False,
gridspec_kw={'hspace': 0.1})
robots = [2, 4, 10, 20]
threshold = 0.1
for i in range(0, len(filenames)):
axs[i].set_rasterized(True)
ax = axs[i]
color = 'tab:blue'
data = datas[i]
# STATS
lower = data[np.argmax(data[:, 2] < threshold), 0]
mean_t = data[np.argmax(data[:, 1] < threshold), 0]
upper = data[np.argmax(data[:, 3] < threshold), 0]
m_std = ((upper-mean_t)+(mean_t-lower))/2.0
print("File: {} - {} +- {}".format(filenames[i], mean_t, m_std))
###
ax.plot(data[:, 0], data[:, 5], "--",
color=color, linewidth=0.2, alpha=0.5)
ax.plot(data[:, 0], data[:, 6], "--",
color=color, linewidth=0.2, alpha=0.5)
ax.plot(data[:, 0], data[:, 4],
label='Object velocity (cm/s)', color=color)
ax.plot([], [], label='Distance to goal (m)', color='tab:orange')
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if i == 1:
ax.set_ylabel('Velocity (cm/s)', fontsize=12, y=-0.08)
ax.tick_params(axis='y')
ax.set_ylim([0, 1.7])
ax2 = ax.twinx()
ax2.set_rasterized(True)
color = 'tab:orange'
if i == 1:
ax2.set_ylabel('Distance (m)', fontsize=12, y=-0.02)
ax2.plot(data[:, 0], data[:, 2], "--",
color=color, linewidth=0.8, alpha=1.0)
ax2.plot(data[:, 0], data[:, 3], "--",
color=color, linewidth=0.8, alpha=1.0)
ax2.plot([], [], label=r'$|\mathcal{R}| = $' +
str(robots[i]), color="white", marker=".")
ax2.legend(handletextpad=-0.1, handlelength=0)
ax2.plot(data[:, 0], data[:, 1], label='Distance to goal', color=color)
ax2.tick_params(axis='y')
ax2.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax2.set_ylim([0, 1.8])
# axs[0].legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),
# ncol=3, fancybox=True, shadow=True)
axs[0].legend(loc='upper center', bbox_to_anchor=(0.5, 1.40),
ncol=2, fancybox=False, shadow=False)
# axs[0].set_title("Scalability of the Swarm", y=1.30,fontsize=16)
axs[-1].set_xlabel('Time (seconds)', fontsize=12)
plt.savefig("scalability.pdf", dpi=200)
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.ticker.MultipleLocator",
"tqdm.tqdm",
"numpy.argmax",
"numpy.array",
"numpy.load",
"matplotlib.pyplot.show"
] | [((478, 493), 'tqdm.tqdm', 'tqdm', (['filenames'], {}), '(filenames)\n', (482, 493), False, 'from tqdm import tqdm\n'), ((2756, 2795), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""scalability.pdf"""'], {'dpi': '(200)'}), "('scalability.pdf', dpi=200)\n", (2767, 2795), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2806), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2804, 2806), True, 'import matplotlib.pyplot as plt\n'), ((230, 244), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (238, 244), True, 'import numpy as np\n'), ((272, 282), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (279, 282), True, 'import numpy as np\n'), ((563, 580), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (570, 580), True, 'import numpy as np\n'), ((1554, 1581), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.5)'], {}), '(0.5)\n', (1576, 1581), True, 'import matplotlib.ticker as ticker\n'), ((2357, 2384), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.5)'], {}), '(0.5)\n', (2379, 2384), True, 'import matplotlib.ticker as ticker\n'), ((900, 933), 'numpy.argmax', 'np.argmax', (['(data[:, 2] < threshold)'], {}), '(data[:, 2] < threshold)\n', (909, 933), True, 'import numpy as np\n'), ((956, 989), 'numpy.argmax', 'np.argmax', (['(data[:, 1] < threshold)'], {}), '(data[:, 1] < threshold)\n', (965, 989), True, 'import numpy as np\n'), ((1011, 1044), 'numpy.argmax', 'np.argmax', (['(data[:, 3] < threshold)'], {}), '(data[:, 3] < threshold)\n', (1020, 1044), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
#data_folder = './experiment_results/'
task = 'mnist'
#flags = ['wb', 'wb_kernel', 'kernel', 'nn']
flags = ['nn']
for flag in flags:
fname = task+flag+'.npy'
[standard, at] = np.load(fname)
ep = [0.01*i for i in range(21)]
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
axes = plt.gca()
ymin = 0.1
ymax = 1
axes.set_ylim([ymin,ymax])
l1 = ax.plot(ep, standard, marker = 's', label = 'StandardNN')
l2 = ax.plot(ep, at, marker = 'o', label = 'ATNN')
legend = ax.legend(loc = 'lower left', fontsize = 12)
ax.set_ylabel('Classification Accuracy', fontsize = 18)
ax.set_xlabel('Max $l_2$ Norm of Adv. Perturbation', fontsize = 18)
if flag == 'wb' or flag=='kernel':
ax.set_title('mnist', fontsize = 20)
fig.tight_layout()
plt.savefig(task+'_'+flag+'.pdf')
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((235, 249), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (242, 249), True, 'import numpy as np\n'), ((301, 315), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (313, 315), True, 'import matplotlib.pyplot as plt\n'), ((413, 422), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (420, 422), True, 'import matplotlib.pyplot as plt\n'), ((905, 944), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(task + '_' + flag + '.pdf')"], {}), "(task + '_' + flag + '.pdf')\n", (916, 944), True, 'import matplotlib.pyplot as plt\n'), ((943, 953), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (951, 953), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
# from raft import RAFT
from core.ours import RAFT
import evaluate
import datasets
import flow_vis
import random
from torch.utils.tensorboard import SummaryWriter
from utils.scheduler import CosineAnnealingWarmupRestarts
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
VAL_FREQ = 5000
def sequence_loss(flow_preds, flow_gt, valid, sparse_lambda=1.0, gamma=0.8, max_flow=MAX_FLOW):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds[0])
flow_loss = 0.0
sparse_loss = 0.0
# exlude invalid pixels and extremely large diplacements
mag = torch.sum(flow_gt ** 2, dim=1).sqrt()
dense_valid = (valid >= 0.5) & (mag < max_flow)
bs, _, I_H, I_W = flow_gt.shape
for i in range(n_predictions):
# i_weight = gamma ** (n_predictions - i - 1)
i_weight = 1.0
i_loss = (flow_preds[0][i] - flow_gt).abs()
# i_loss = (flow_preds[0][i] - flow_gt).square()
flow_loss += i_weight * (dense_valid[:, None] * i_loss).mean()
if sparse_lambda > 0.0:
ref, sparse_flow, _, _ = flow_preds[1][i]
scale = torch.tensor((I_W - 1, I_H - 1), dtype=torch.float32).view(1, 1, 2).to(sparse_flow.device)
flatten_gt = flow_gt.flatten(2).permute(0, 2, 1)
flatten_valid = valid.flatten(1)
coords = torch.round(ref * scale).long()
coords = torch.clamp_max(coords[..., 1] * coords[..., 0], I_H * I_W - 1)
sparse_gt = torch.gather(flatten_gt, 1, coords.unsqueeze(-1).repeat(1, 1, 2))
sparse_valid = torch.gather(flatten_valid, 1, coords)
sparse_valid = (sparse_valid >= 0.5) & (torch.sum(sparse_gt ** 2, dim=-1).sqrt() < max_flow)
sparse_i_loss = (sparse_flow * scale - sparse_gt).abs()
# sparse_i_loss = (sparse_flow * scale - sparse_gt).square()
sparse_loss += i_weight * (sparse_valid[..., None] * sparse_i_loss).mean()
loss = flow_loss + sparse_loss * sparse_lambda
epe = torch.sum((flow_preds[0][-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[dense_valid.view(-1)]
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
'loss': loss,
'flow_loss': flow_loss,
'sparse_loss': sparse_loss
}
return loss, metrics
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.wdecay)
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, round(args.num_steps * 0.8))
# scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
# pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
# scheduler = torch.optim.lr_scheduler.OneCycleLR(
# optimizer, args.lr,
# args.num_steps + 10,
# pct_start=0.05,
# cycle_momentum=False,
# anneal_strategy='cos')
# scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
# optimizer, 1000, T_mult=1, eta_min=0, last_epoch=- 1, verbose=False)
return optimizer, scheduler
class Logger:
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = None
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
# print the training status
print(training_str + metrics_str)
if self.writer is None:
self.writer = SummaryWriter()
for k in self.running_loss:
self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
self.running_loss[k] = 0.0
def push(self, metrics):
self.total_steps += 1
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter()
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def write_image(self, image1, image2, target, pred, phase="T", idx=0):
if self.writer is None:
self.writer = SummaryWriter()
_, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (1, 2, 0))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (1, 2, 0))
target = target.detach().cpu().numpy()
target = np.transpose(target, (1, 2, 0))
target_img = flow_vis.flow_to_color(target, convert_to_bgr=False)
pred_img = list()
for p_i in range(len(pred[0])):
ref, sparse_flow, masks, scores = pred[1][p_i]
coords = torch.round(ref.squeeze(0) * scale).long()
coords = coords.cpu().numpy()
confidence = np.squeeze(scores.squeeze(0).cpu().numpy())
ref_img = cv2.cvtColor(np.array(image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = pred[0][p_i].squeeze(0).detach().cpu().numpy()
this_pred = np.transpose(this_pred, (1, 2, 0))
this_pred = flow_vis.flow_to_color(this_pred, convert_to_bgr=False)
pred_img.append(this_pred)
mask_img = list()
top_k = len(pred[0])
# top_k_indices = np.argsort(-confidence)[:top_k]
masks = masks.squeeze(0).cpu()
# masks = masks.reshape(self.num_keypoints, 1, H, W)
masks = F.interpolate(masks, size=(I_H, I_W), mode="bilinear", align_corners=False).numpy()
masks = masks.squeeze(1)
top_k_indices = np.argsort(-np.sum(masks, axis=(1, 2)))[:top_k]
for m_i in top_k_indices:
coord = coords[m_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.cvtColor(np.array(image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[m_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
mask_img.append(ref_img)
masked_flow = np.expand_dims(masks[m_i], axis=-1) * this_pred
mask_img.append(masked_flow)
pred_img = np.concatenate(pred_img, axis=1)
mask_img = np.concatenate(mask_img, axis=1)
image = np.concatenate((np.concatenate((image1, image2, target_img, pred_img), axis=1),
np.concatenate((image1, image2, target_img, mask_img), axis=1)), axis=0)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, idx + 1), image, self.total_steps, dataformats='HWC')
def write_images(self, image1, image2, targets, preds, phase="T"):
if self.writer is None:
self.writer = SummaryWriter()
_, _, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (0, 2, 3, 1))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (0, 2, 3, 1))
targets = targets.detach().cpu().numpy()
targets = np.transpose(targets, (0, 2, 3, 1))
sampled_indices = random.sample(range(len(targets)), min(10, len(targets)))
for i_i, n_i in enumerate(sampled_indices):
this_image1 = image1[n_i]
this_image2 = image2[n_i]
target_img = flow_vis.flow_to_color(targets[n_i], convert_to_bgr=False)
pred_img = list()
for p_i in range(len(preds[0])):
ref, sparse_flow, masks, scores = preds[1][p_i]
coords = torch.round(ref * scale).long()
coords = coords.cpu().numpy()[n_i]
confidence = np.squeeze(scores.cpu().numpy()[n_i])
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = preds[0][p_i].detach().cpu().numpy()[n_i]
this_pred = np.transpose(this_pred, (1, 2, 0))
this_pred = flow_vis.flow_to_color(this_pred, convert_to_bgr=False)
pred_img.append(this_pred)
mask_img = list()
top_k = len(preds[0])
# top_k_indices = np.argsort(-confidence)[:top_k]
masks = masks[n_i].cpu()
masks = F.interpolate(masks, size=(I_H, I_W), mode="bilinear", align_corners=False).numpy()
masks = masks.squeeze(1)
top_k_indices = np.argsort(-np.sum(masks, axis=(1, 2)))[:top_k]
for m_i in top_k_indices:
coord = coords[m_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[m_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
mask_img.append(ref_img)
masked_flow = np.expand_dims(masks[m_i], axis=-1) * this_pred
mask_img.append(masked_flow)
pred_img = np.concatenate(pred_img, axis=1)
mask_img = np.concatenate(mask_img, axis=1)
image = np.concatenate((np.concatenate((this_image1, this_image2, target_img, pred_img), axis=1),
np.concatenate((this_image1, this_image2, target_img, mask_img), axis=1)), axis=0)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, i_i + 1), image, self.total_steps, dataformats='HWC')
def write_seg_images(self, image1, image2, targets, preds, phase="T"):
if self.writer is None:
self.writer = SummaryWriter()
_, _, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (0, 2, 3, 1))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (0, 2, 3, 1))
targets = targets.detach().cpu().numpy()
targets = np.transpose(targets, (0, 2, 3, 1))
for n_i in range(len(targets)):
this_image1 = image1[n_i]
this_image2 = image2[n_i]
target_img = flow_vis.flow_to_color(targets[n_i], convert_to_bgr=False)
pred_img = list()
for p_i in range(len(preds[0])):
ref, sparse_flow, masks, scores = preds[1][p_i]
coords = torch.round(ref * scale).long()
coords = coords.detach().cpu().numpy()[n_i]
confidence = np.squeeze(scores.detach().cpu().numpy()[n_i])
ref_img = cv2.cvtColor(np.array(this_image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = preds[0][p_i].detach().cpu().numpy()[n_i]
this_pred = np.transpose(this_pred, (1, 2, 0))
pred_img.append(flow_vis.flow_to_color(this_pred, convert_to_bgr=False))
pred_img = np.concatenate(pred_img, axis=1)
image = np.concatenate((this_image1, this_image2, target_img, pred_img), axis=1)
image = image.astype(np.uint8)
self.writer.add_image("{}_Image_{:02d}".format(phase, n_i + 1), image, self.total_steps, dataformats='HWC')
def close(self):
self.writer.close()
def train(args):
model = nn.DataParallel(RAFT(args), device_ids=args.gpus)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
model.cuda()
model.train()
# if args.stage != 'chairs':
# model.module.freeze_bn()
train_loader = datasets.fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
# scaler = GradScaler(enabled=args.mixed_precision)
logger = Logger(model, scheduler)
VAL_FREQ = 5000
# VAL_FREQ = 10
IMAGE_FREQ = 5000
add_noise = True
should_keep_training = True
while should_keep_training:
for i_batch, data_blob in enumerate(train_loader):
optimizer.zero_grad()
image1, image2, flow, valid = [x.cuda() for x in data_blob]
if args.add_noise:
stdv = np.random.uniform(0.0, 5.0)
image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
flow_predictions = model(image1, image2, iters=args.iters)
sparse_lambda = 1.0 if total_steps < 20000 else 0.0
# sparse_lambda = 1.0
loss, metrics = sequence_loss(flow_predictions, flow, valid, sparse_lambda, args.gamma)
# scaler.scale(loss).backward()
# scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
loss.backward()
optimizer.step()
# scaler.step(optimizer)
scheduler.step()
# scaler.update()
logger.push(metrics)
if total_steps % IMAGE_FREQ == IMAGE_FREQ - 1:
logger.write_images(image1, image2, flow, flow_predictions, phase="T")
if total_steps % VAL_FREQ == VAL_FREQ - 1:
PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
results = {}
for val_dataset in args.validation:
if val_dataset == 'chairs':
results.update(evaluate.validate_chairs(model.module, logger=logger, iters=args.iters))
elif val_dataset == 'sintel':
results.update(evaluate.validate_sintel(model.module, iters=args.iters))
elif val_dataset == 'kitti':
results.update(evaluate.validate_kitti(model.module, iters=args.iters))
logger.write_dict(results)
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
total_steps += 1
if total_steps > args.num_steps:
should_keep_training = False
break
logger.close()
PATH = 'checkpoints/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--iters', type=int, default=3)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--add_noise', action='store_true')
args = parser.parse_args()
torch.manual_seed(2022)
np.random.seed(2022)
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
train(args) | [
"flow_vis.flow_to_color",
"numpy.array",
"torch.sum",
"torch.nn.functional.interpolate",
"sys.path.append",
"datasets.fetch_dataloader",
"torch.utils.tensorboard.SummaryWriter",
"evaluate.validate_chairs",
"argparse.ArgumentParser",
"evaluate.validate_kitti",
"os.path.isdir",
"numpy.random.see... | [((59, 82), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (74, 82), False, 'import sys\n'), ((14695, 14726), 'datasets.fetch_dataloader', 'datasets.fetch_dataloader', (['args'], {}), '(args)\n', (14720, 14726), False, 'import datasets\n'), ((17465, 17490), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17488, 17490), False, 'import argparse\n'), ((18792, 18815), 'torch.manual_seed', 'torch.manual_seed', (['(2022)'], {}), '(2022)\n', (18809, 18815), False, 'import torch\n'), ((18820, 18840), 'numpy.random.seed', 'np.random.seed', (['(2022)'], {}), '(2022)\n', (18834, 18840), True, 'import numpy as np\n'), ((5981, 6012), 'numpy.transpose', 'np.transpose', (['image1', '(1, 2, 0)'], {}), '(image1, (1, 2, 0))\n', (5993, 6012), True, 'import numpy as np\n'), ((6077, 6108), 'numpy.transpose', 'np.transpose', (['image2', '(1, 2, 0)'], {}), '(image2, (1, 2, 0))\n', (6089, 6108), True, 'import numpy as np\n'), ((6173, 6204), 'numpy.transpose', 'np.transpose', (['target', '(1, 2, 0)'], {}), '(target, (1, 2, 0))\n', (6185, 6204), True, 'import numpy as np\n'), ((6227, 6279), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['target'], {'convert_to_bgr': '(False)'}), '(target, convert_to_bgr=False)\n', (6249, 6279), False, 'import flow_vis\n'), ((8223, 8255), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (8237, 8255), True, 'import numpy as np\n'), ((8275, 8307), 'numpy.concatenate', 'np.concatenate', (['mask_img'], {'axis': '(1)'}), '(mask_img, axis=1)\n', (8289, 8307), True, 'import numpy as np\n'), ((9010, 9044), 'numpy.transpose', 'np.transpose', (['image1', '(0, 2, 3, 1)'], {}), '(image1, (0, 2, 3, 1))\n', (9022, 9044), True, 'import numpy as np\n'), ((9109, 9143), 'numpy.transpose', 'np.transpose', (['image2', '(0, 2, 3, 1)'], {}), '(image2, (0, 2, 3, 1))\n', (9121, 9143), True, 'import numpy as np\n'), ((9211, 9246), 'numpy.transpose', 'np.transpose', (['targets', '(0, 2, 3, 1)'], {}), '(targets, (0, 2, 3, 1))\n', (9223, 9246), True, 'import numpy as np\n'), ((12457, 12491), 'numpy.transpose', 'np.transpose', (['image1', '(0, 2, 3, 1)'], {}), '(image1, (0, 2, 3, 1))\n', (12469, 12491), True, 'import numpy as np\n'), ((12556, 12590), 'numpy.transpose', 'np.transpose', (['image2', '(0, 2, 3, 1)'], {}), '(image2, (0, 2, 3, 1))\n', (12568, 12590), True, 'import numpy as np\n'), ((12658, 12693), 'numpy.transpose', 'np.transpose', (['targets', '(0, 2, 3, 1)'], {}), '(targets, (0, 2, 3, 1))\n', (12670, 12693), True, 'import numpy as np\n'), ((14363, 14373), 'core.ours.RAFT', 'RAFT', (['args'], {}), '(args)\n', (14367, 14373), False, 'from core.ours import RAFT\n'), ((18853, 18881), 'os.path.isdir', 'os.path.isdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (18866, 18881), False, 'import os\n'), ((18891, 18914), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (18899, 18914), False, 'import os\n'), ((1332, 1362), 'torch.sum', 'torch.sum', (['(flow_gt ** 2)'], {'dim': '(1)'}), '(flow_gt ** 2, dim=1)\n', (1341, 1362), False, 'import torch\n'), ((2130, 2193), 'torch.clamp_max', 'torch.clamp_max', (['(coords[..., 1] * coords[..., 0])', '(I_H * I_W - 1)'], {}), '(coords[..., 1] * coords[..., 0], I_H * I_W - 1)\n', (2145, 2193), False, 'import torch\n'), ((2311, 2349), 'torch.gather', 'torch.gather', (['flatten_valid', '(1)', 'coords'], {}), '(flatten_valid, 1, coords)\n', (2323, 2349), False, 'import torch\n'), ((2746, 2798), 'torch.sum', 'torch.sum', (['((flow_preds[0][-1] - flow_gt) ** 2)'], {'dim': '(1)'}), '((flow_preds[0][-1] - flow_gt) ** 2, dim=1)\n', (2755, 2798), False, 'import torch\n'), ((4886, 4901), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (4899, 4901), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5522, 5537), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5535, 5537), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5773, 5788), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5786, 5788), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((7068, 7102), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (7080, 7102), True, 'import numpy as np\n'), ((7127, 7182), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (7149, 7182), False, 'import flow_vis\n'), ((8796, 8811), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (8809, 8811), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((9484, 9542), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['targets[n_i]'], {'convert_to_bgr': '(False)'}), '(targets[n_i], convert_to_bgr=False)\n', (9506, 9542), False, 'import flow_vis\n'), ((11627, 11659), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (11641, 11659), True, 'import numpy as np\n'), ((11683, 11715), 'numpy.concatenate', 'np.concatenate', (['mask_img'], {'axis': '(1)'}), '(mask_img, axis=1)\n', (11697, 11715), True, 'import numpy as np\n'), ((12243, 12258), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (12256, 12258), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((12835, 12893), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['targets[n_i]'], {'convert_to_bgr': '(False)'}), '(targets[n_i], convert_to_bgr=False)\n', (12857, 12893), False, 'import flow_vis\n'), ((13974, 14006), 'numpy.concatenate', 'np.concatenate', (['pred_img'], {'axis': '(1)'}), '(pred_img, axis=1)\n', (13988, 14006), True, 'import numpy as np\n'), ((14027, 14099), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, pred_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, pred_img), axis=1)\n', (14041, 14099), True, 'import numpy as np\n'), ((14525, 14554), 'torch.load', 'torch.load', (['args.restore_ckpt'], {}), '(args.restore_ckpt)\n', (14535, 14554), False, 'import torch\n'), ((6615, 6647), 'numpy.array', 'np.array', (['image1'], {'dtype': 'np.uint8'}), '(image1, dtype=np.uint8)\n', (6623, 6647), True, 'import numpy as np\n'), ((6881, 6914), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (6889, 6914), True, 'import numpy as np\n'), ((7452, 7527), 'torch.nn.functional.interpolate', 'F.interpolate', (['masks'], {'size': '(I_H, I_W)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(masks, size=(I_H, I_W), mode='bilinear', align_corners=False)\n", (7465, 7527), True, 'import torch.nn.functional as F\n'), ((7814, 7846), 'numpy.array', 'np.array', (['image1'], {'dtype': 'np.uint8'}), '(image1, dtype=np.uint8)\n', (7822, 7846), True, 'import numpy as np\n'), ((7997, 8030), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (8005, 8030), True, 'import numpy as np\n'), ((8114, 8149), 'numpy.expand_dims', 'np.expand_dims', (['masks[m_i]'], {'axis': '(-1)'}), '(masks[m_i], axis=-1)\n', (8128, 8149), True, 'import numpy as np\n'), ((8340, 8402), 'numpy.concatenate', 'np.concatenate', (['(image1, image2, target_img, pred_img)'], {'axis': '(1)'}), '((image1, image2, target_img, pred_img), axis=1)\n', (8354, 8402), True, 'import numpy as np\n'), ((8436, 8498), 'numpy.concatenate', 'np.concatenate', (['(image1, image2, target_img, mask_img)'], {'axis': '(1)'}), '((image1, image2, target_img, mask_img), axis=1)\n', (8450, 8498), True, 'import numpy as np\n'), ((10457, 10491), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (10469, 10491), True, 'import numpy as np\n'), ((10520, 10575), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (10542, 10575), False, 'import flow_vis\n'), ((13826, 13860), 'numpy.transpose', 'np.transpose', (['this_pred', '(1, 2, 0)'], {}), '(this_pred, (1, 2, 0))\n', (13838, 13860), True, 'import numpy as np\n'), ((15267, 15294), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(5.0)'], {}), '(0.0, 5.0)\n', (15284, 15294), True, 'import numpy as np\n'), ((2077, 2101), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (2088, 2101), False, 'import torch\n'), ((7605, 7631), 'numpy.sum', 'np.sum', (['masks'], {'axis': '(1, 2)'}), '(masks, axis=(1, 2))\n', (7611, 7631), True, 'import numpy as np\n'), ((9896, 9933), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (9904, 9933), True, 'import numpy as np\n'), ((10263, 10296), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (10271, 10296), True, 'import numpy as np\n'), ((10803, 10878), 'torch.nn.functional.interpolate', 'F.interpolate', (['masks'], {'size': '(I_H, I_W)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(masks, size=(I_H, I_W), mode='bilinear', align_corners=False)\n", (10816, 10878), True, 'import torch.nn.functional as F\n'), ((11189, 11226), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (11197, 11226), True, 'import numpy as np\n'), ((11385, 11418), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (11393, 11418), True, 'import numpy as np\n'), ((11510, 11545), 'numpy.expand_dims', 'np.expand_dims', (['masks[m_i]'], {'axis': '(-1)'}), '(masks[m_i], axis=-1)\n', (11524, 11545), True, 'import numpy as np\n'), ((11752, 11824), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, pred_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, pred_img), axis=1)\n', (11766, 11824), True, 'import numpy as np\n'), ((11862, 11934), 'numpy.concatenate', 'np.concatenate', (['(this_image1, this_image2, target_img, mask_img)'], {'axis': '(1)'}), '((this_image1, this_image2, target_img, mask_img), axis=1)\n', (11876, 11934), True, 'import numpy as np\n'), ((13265, 13302), 'numpy.array', 'np.array', (['this_image1'], {'dtype': 'np.uint8'}), '(this_image1, dtype=np.uint8)\n', (13273, 13302), True, 'import numpy as np\n'), ((13632, 13665), 'numpy.array', 'np.array', (['ref_img'], {'dtype': 'np.uint8'}), '(ref_img, dtype=np.uint8)\n', (13640, 13665), True, 'import numpy as np\n'), ((13893, 13948), 'flow_vis.flow_to_color', 'flow_vis.flow_to_color', (['this_pred'], {'convert_to_bgr': '(False)'}), '(this_pred, convert_to_bgr=False)\n', (13915, 13948), False, 'import flow_vis\n'), ((5841, 5886), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (5853, 5886), False, 'import torch\n'), ((8867, 8912), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (8879, 8912), False, 'import torch\n'), ((9707, 9731), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (9718, 9731), False, 'import torch\n'), ((10964, 10990), 'numpy.sum', 'np.sum', (['masks'], {'axis': '(1, 2)'}), '(masks, axis=(1, 2))\n', (10970, 10990), True, 'import numpy as np\n'), ((12314, 12359), 'torch.tensor', 'torch.tensor', (['(I_W, I_H)'], {'dtype': 'torch.float32'}), '((I_W, I_H), dtype=torch.float32)\n', (12326, 12359), False, 'import torch\n'), ((13058, 13082), 'torch.round', 'torch.round', (['(ref * scale)'], {}), '(ref * scale)\n', (13069, 13082), False, 'import torch\n'), ((1859, 1912), 'torch.tensor', 'torch.tensor', (['(I_W - 1, I_H - 1)'], {'dtype': 'torch.float32'}), '((I_W - 1, I_H - 1), dtype=torch.float32)\n', (1871, 1912), False, 'import torch\n'), ((2402, 2435), 'torch.sum', 'torch.sum', (['(sparse_gt ** 2)'], {'dim': '(-1)'}), '(sparse_gt ** 2, dim=-1)\n', (2411, 2435), False, 'import torch\n'), ((16602, 16673), 'evaluate.validate_chairs', 'evaluate.validate_chairs', (['model.module'], {'logger': 'logger', 'iters': 'args.iters'}), '(model.module, logger=logger, iters=args.iters)\n', (16626, 16673), False, 'import evaluate\n'), ((16764, 16820), 'evaluate.validate_sintel', 'evaluate.validate_sintel', (['model.module'], {'iters': 'args.iters'}), '(model.module, iters=args.iters)\n', (16788, 16820), False, 'import evaluate\n'), ((16910, 16965), 'evaluate.validate_kitti', 'evaluate.validate_kitti', (['model.module'], {'iters': 'args.iters'}), '(model.module, iters=args.iters)\n', (16933, 16965), False, 'import evaluate\n'), ((15337, 15363), 'torch.randn', 'torch.randn', (['*image1.shape'], {}), '(*image1.shape)\n', (15348, 15363), False, 'import torch\n'), ((15432, 15458), 'torch.randn', 'torch.randn', (['*image2.shape'], {}), '(*image2.shape)\n', (15443, 15458), False, 'import torch\n')] |
from typing import List
Vector = List[float]
Matrix = List[Vector]
def zero_matrix(mat: Matrix) -> None:
m = len(mat)
n = len(mat[0])
zero_row = [False for _ in range(m)]
zero_col = [False for _ in range(n)]
for i in range(m):
for j in range(n):
if mat[i][j] == 0:
zero_row[i] = True
zero_col[j] = True
for i in range(m):
if zero_row[i]:
nullify_row(mat, i)
for i in range(n):
if zero_col[i]:
nullify_col(mat, i)
def nullify_row(mat: Matrix, row: int) -> None:
n = len(mat[0])
for j in range(n):
mat[row][j] = 0
def nullify_col(mat: Matrix, col: int) -> None:
n = len(mat)
for j in range(n):
mat[j][col] = 0
if __name__ == '__main__':
from icecream import ic
from numpy.random import PCG64, Generator
rng = Generator(PCG64(12345))
for _ in range(3):
mat0 = rng.integers(0, 5, size=[4, 5], endpoint=True)
ic(mat0)
zero_matrix(mat0)
ic(mat0)
| [
"icecream.ic",
"numpy.random.PCG64"
] | [((816, 828), 'numpy.random.PCG64', 'PCG64', (['(12345)'], {}), '(12345)\n', (821, 828), False, 'from numpy.random import PCG64, Generator\n'), ((913, 921), 'icecream.ic', 'ic', (['mat0'], {}), '(mat0)\n', (915, 921), False, 'from icecream import ic\n'), ((948, 956), 'icecream.ic', 'ic', (['mat0'], {}), '(mat0)\n', (950, 956), False, 'from icecream import ic\n')] |
import numpy as np
import pandas as pd
from statsmodels.regression.lme import MixedLM
from numpy.testing import assert_almost_equal
from . import lme_r_results
from scipy.misc import derivative
from statsmodels.base import _penalties as penalties
import os
import csv
class R_Results(object):
"""
A class for holding various results obtained from fitting one data
set using lmer in R.
Parameters
----------
meth : string
Either "ml" or "reml".
irfs : string
Either "irf", for independent random effects, or "drf" for
dependent random effects.
ds_ix : integer
The number of the data set
"""
def __init__(self, meth, irfs, ds_ix):
bname = "_%s_%s_%d" % (meth, irfs, ds_ix)
self.coef = getattr(lme_r_results, "coef" + bname)
self.vcov_r = getattr(lme_r_results, "vcov" + bname)
self.cov_re_r = getattr(lme_r_results, "cov_re" + bname)
self.sig2_r = getattr(lme_r_results, "sig2" + bname)
self.loglike = getattr(lme_r_results, "loglike" + bname)
if hasattr(lme_r_results, "ranef_mean" + bname):
self.ranef_postmean = getattr(lme_r_results, "ranef_mean"
+ bname)
self.ranef_condvar = getattr(lme_r_results,
"ranef_condvar" + bname)
self.ranef_condvar = np.atleast_2d(self.ranef_condvar)
# Load the data file
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fname = os.path.join(rdir, "lme%02d.csv" % ds_ix)
fid = open(fname)
rdr = csv.reader(fid)
header = next(rdr)
data = [[float(x) for x in line] for line in rdr]
data = np.asarray(data)
# Split into exog, endog, etc.
self.endog = data[:,header.index("endog")]
self.groups = data[:,header.index("groups")]
ii = [i for i,x in enumerate(header) if
x.startswith("exog_fe")]
self.exog_fe = data[:,ii]
ii = [i for i,x in enumerate(header) if
x.startswith("exog_re")]
self.exog_re = data[:,ii]
class TestMixedLM(object):
# Test analytic scores using numeric differentiation
# TODO: should also do this with the hessian
def test_score(self):
n = 200
m = 5
p = 3
pr = 2
for jl in 0,1:
for reml in False,True:
for cov_pen_wt in 0,10:
cov_pen = penalties.PSD(cov_pen_wt)
np.random.seed(355890504)
exog_fe = np.random.normal(size=(n*m, p))
exog_re = np.random.normal(size=(n*m, pr))
endog = exog_fe.sum(1) + np.random.normal(size=n*m)
groups = np.kron(range(n), np.ones(m))
md = MixedLM(endog, exog_fe, groups, exog_re)
md.reml = reml
md.cov_pen = cov_pen
if jl == 0:
like = lambda x: -md.loglike_sqrt(x)
score = lambda x: -md.score_sqrt(x)
else:
like = lambda x: -md.loglike(x)
score = lambda x: -md.score(x)
for kr in range(5):
fe_params = np.random.normal(size=p)
cov_re = np.random.normal(size=(pr,pr))
cov_re = np.dot(cov_re.T, cov_re)
params_prof = md._pack(fe_params, cov_re)
gr = score(params_prof)
ngr = np.zeros_like(gr)
for k in range(len(ngr)):
def f(x):
pp = params_prof.copy()
pp[k] = x
return like(pp)
ngr[k] = derivative(f, params_prof[k],
dx=1e-6)
assert_almost_equal(gr / ngr, np.ones(len(gr)),
decimal=3)
def test_default_re(self):
np.random.seed(323590805)
exog = np.random.normal(size=(300,4))
groups = np.kron(np.arange(100), [1,1,1])
g_errors = np.kron(np.random.normal(size=100), [1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mdf1 = MixedLM(endog, exog, groups).fit()
mdf2 = MixedLM(endog, exog, groups, np.ones(300)).fit()
assert_almost_equal(mdf1.params, mdf2.params, decimal=8)
def test_EM(self):
np.random.seed(323590805)
exog = np.random.normal(size=(300,4))
groups = np.kron(np.arange(100), [1,1,1])
g_errors = np.kron(np.random.normal(size=100), [1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mdf1 = MixedLM(endog, exog, groups).fit()
def test_formulas(self):
np.random.seed(24109403)
exog = np.random.normal(size=(300,4))
exog_re = np.random.normal(size=300)
groups = np.kron(np.arange(100), [1,1,1])
g_errors = exog_re * np.kron(np.random.normal(size=100),
[1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mdf1 = MixedLM(endog, exog, groups, exog_re).fit()
df = pd.DataFrame({"endog": endog})
for k in range(exog.shape[1]):
df["exog%d" % k] = exog[:,k]
df["exog_re"] = exog_re
md2 = MixedLM.from_formula(
"endog ~ 0 + exog0 + exog1 + exog2 + exog3",
groups=groups, data=df)
md2.set_random("0 + exog_re", data=df)
mdf2 = md2.fit()
assert_almost_equal(mdf1.params, mdf2.params)
# Check that it runs when the dimension of the random effects
# changes following set_random.
md2 = MixedLM.from_formula(
"endog ~ 0 + exog0 + exog1 + exog2 + exog3",
groups=groups, data=df)
md2.set_random("exog_re", data=df)
mdf2 = md2.fit()
def test_regularized(self):
np.random.seed(3453908)
exog = np.random.normal(size=(400,5))
groups = np.kron(np.arange(100), np.ones(4))
expected_endog = exog[:,0] - exog[:,2]
endog = expected_endog +\
np.kron(np.random.normal(size=100), np.ones(4)) +\
np.random.normal(size=400)
# L1 regularization
md = MixedLM(endog, exog, groups)
mdf1 = md.fit_regularized(alpha=1.)
mdf1.summary()
# L1 regularization
md = MixedLM(endog, exog, groups)
mdf2 = md.fit_regularized(alpha=10*np.ones(5))
mdf2.summary()
# L2 regularization
pen = penalties.L2()
mdf3 = md.fit_regularized(method=pen, alpha=0.)
mdf3.summary()
# L2 regularization
pen = penalties.L2()
mdf4 = md.fit_regularized(method=pen, alpha=100.)
mdf4.summary()
# Pseudo-Huber regularization
pen = penalties.PseudoHuber(0.3)
mdf4 = md.fit_regularized(method=pen, alpha=1.)
mdf4.summary()
def do1(self, reml, irf, ds_ix):
# No need to check independent random effects when there is
# only one of them.
if irf and ds_ix < 6:
return
irfs = "irf" if irf else "drf"
meth = "reml" if reml else "ml"
rslt = R_Results(meth, irfs, ds_ix)
# Variance component MLE ~ 0 may require manual tweaking of
# algorithm parameters, so exclude from tests for now.
if np.min(np.diag(rslt.cov_re_r)) < 0.01:
print("Skipping %d since solution is on boundary." % ds_ix)
return
# Fit the model
md = MixedLM(rslt.endog, rslt.exog_fe, rslt.groups,
rslt.exog_re)
if not irf: # Free random effects covariance
mdf = md.fit(gtol=1e-7, reml=reml)
else: # Independent random effects
k_fe = rslt.exog_fe.shape[1]
k_re = rslt.exog_re.shape[1]
mdf = md.fit(reml=reml, gtol=1e-7,
free=(np.ones(k_fe), np.eye(k_re)))
assert_almost_equal(mdf.fe_params, rslt.coef, decimal=4)
assert_almost_equal(mdf.cov_re, rslt.cov_re_r, decimal=4)
assert_almost_equal(mdf.sig2, rslt.sig2_r, decimal=4)
pf = rslt.exog_fe.shape[1]
assert_almost_equal(rslt.vcov_r, mdf.cov_params()[0:pf,0:pf],
decimal=3)
assert_almost_equal(mdf.likeval, rslt.loglike[0], decimal=2)
# Not supported in R
if not irf:
assert_almost_equal(mdf.ranef()[0], rslt.ranef_postmean,
decimal=3)
assert_almost_equal(mdf.ranef_cov()[0],
rslt.ranef_condvar,
decimal=3)
# Run all the tests against R
def test_r(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fnames = os.listdir(rdir)
fnames = [x for x in fnames if x.startswith("lme")
and x.endswith(".csv")]
for fname in fnames:
for reml in False,True:
for irf in False,True:
ds_ix = int(fname[3:5])
yield self.do1, reml, irf, ds_ix
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| [
"numpy.arange",
"numpy.atleast_2d",
"os.listdir",
"statsmodels.regression.lme.MixedLM",
"numpy.asarray",
"scipy.misc.derivative",
"numpy.testing.assert_almost_equal",
"numpy.dot",
"numpy.random.seed",
"statsmodels.base._penalties.PseudoHuber",
"pandas.DataFrame",
"csv.reader",
"numpy.random.... | [((9549, 9636), 'nose.runmodule', 'nose.runmodule', ([], {'argv': "[__file__, '-vvs', '-x', '--pdb', '--pdb-failure']", 'exit': '(False)'}), "(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", (9563, 9636), False, 'import nose\n'), ((1542, 1574), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""'], {}), "(cur_dir, 'results')\n", (1554, 1574), False, 'import os\n'), ((1591, 1632), 'os.path.join', 'os.path.join', (['rdir', "('lme%02d.csv' % ds_ix)"], {}), "(rdir, 'lme%02d.csv' % ds_ix)\n", (1603, 1632), False, 'import os\n'), ((1673, 1688), 'csv.reader', 'csv.reader', (['fid'], {}), '(fid)\n', (1683, 1688), False, 'import csv\n'), ((1789, 1805), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1799, 1805), True, 'import numpy as np\n'), ((4224, 4249), 'numpy.random.seed', 'np.random.seed', (['(323590805)'], {}), '(323590805)\n', (4238, 4249), True, 'import numpy as np\n'), ((4265, 4296), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300, 4)'}), '(size=(300, 4))\n', (4281, 4296), True, 'import numpy as np\n'), ((4600, 4656), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'mdf2.params'], {'decimal': '(8)'}), '(mdf1.params, mdf2.params, decimal=8)\n', (4619, 4656), False, 'from numpy.testing import assert_almost_equal\n'), ((4691, 4716), 'numpy.random.seed', 'np.random.seed', (['(323590805)'], {}), '(323590805)\n', (4705, 4716), True, 'import numpy as np\n'), ((4732, 4763), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300, 4)'}), '(size=(300, 4))\n', (4748, 4763), True, 'import numpy as np\n'), ((5034, 5058), 'numpy.random.seed', 'np.random.seed', (['(24109403)'], {}), '(24109403)\n', (5048, 5058), True, 'import numpy as np\n'), ((5074, 5105), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300, 4)'}), '(size=(300, 4))\n', (5090, 5105), True, 'import numpy as np\n'), ((5123, 5149), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300)'}), '(size=300)\n', (5139, 5149), True, 'import numpy as np\n'), ((5453, 5483), 'pandas.DataFrame', 'pd.DataFrame', (["{'endog': endog}"], {}), "({'endog': endog})\n", (5465, 5483), True, 'import pandas as pd\n'), ((5610, 5704), 'statsmodels.regression.lme.MixedLM.from_formula', 'MixedLM.from_formula', (['"""endog ~ 0 + exog0 + exog1 + exog2 + exog3"""'], {'groups': 'groups', 'data': 'df'}), "('endog ~ 0 + exog0 + exog1 + exog2 + exog3', groups=\n groups, data=df)\n", (5630, 5704), False, 'from statsmodels.regression.lme import MixedLM\n'), ((5806, 5851), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf1.params', 'mdf2.params'], {}), '(mdf1.params, mdf2.params)\n', (5825, 5851), False, 'from numpy.testing import assert_almost_equal\n'), ((5977, 6071), 'statsmodels.regression.lme.MixedLM.from_formula', 'MixedLM.from_formula', (['"""endog ~ 0 + exog0 + exog1 + exog2 + exog3"""'], {'groups': 'groups', 'data': 'df'}), "('endog ~ 0 + exog0 + exog1 + exog2 + exog3', groups=\n groups, data=df)\n", (5997, 6071), False, 'from statsmodels.regression.lme import MixedLM\n'), ((6203, 6226), 'numpy.random.seed', 'np.random.seed', (['(3453908)'], {}), '(3453908)\n', (6217, 6226), True, 'import numpy as np\n'), ((6242, 6273), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(400, 5)'}), '(size=(400, 5))\n', (6258, 6273), True, 'import numpy as np\n'), ((6559, 6587), 'statsmodels.regression.lme.MixedLM', 'MixedLM', (['endog', 'exog', 'groups'], {}), '(endog, exog, groups)\n', (6566, 6587), False, 'from statsmodels.regression.lme import MixedLM\n'), ((6697, 6725), 'statsmodels.regression.lme.MixedLM', 'MixedLM', (['endog', 'exog', 'groups'], {}), '(endog, exog, groups)\n', (6704, 6725), False, 'from statsmodels.regression.lme import MixedLM\n'), ((6847, 6861), 'statsmodels.base._penalties.L2', 'penalties.L2', ([], {}), '()\n', (6859, 6861), True, 'from statsmodels.base import _penalties as penalties\n'), ((6984, 6998), 'statsmodels.base._penalties.L2', 'penalties.L2', ([], {}), '()\n', (6996, 6998), True, 'from statsmodels.base import _penalties as penalties\n'), ((7133, 7159), 'statsmodels.base._penalties.PseudoHuber', 'penalties.PseudoHuber', (['(0.3)'], {}), '(0.3)\n', (7154, 7159), True, 'from statsmodels.base import _penalties as penalties\n'), ((7860, 7920), 'statsmodels.regression.lme.MixedLM', 'MixedLM', (['rslt.endog', 'rslt.exog_fe', 'rslt.groups', 'rslt.exog_re'], {}), '(rslt.endog, rslt.exog_fe, rslt.groups, rslt.exog_re)\n', (7867, 7920), False, 'from statsmodels.regression.lme import MixedLM\n'), ((8284, 8340), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.fe_params', 'rslt.coef'], {'decimal': '(4)'}), '(mdf.fe_params, rslt.coef, decimal=4)\n', (8303, 8340), False, 'from numpy.testing import assert_almost_equal\n'), ((8349, 8406), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.cov_re', 'rslt.cov_re_r'], {'decimal': '(4)'}), '(mdf.cov_re, rslt.cov_re_r, decimal=4)\n', (8368, 8406), False, 'from numpy.testing import assert_almost_equal\n'), ((8415, 8468), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.sig2', 'rslt.sig2_r'], {'decimal': '(4)'}), '(mdf.sig2, rslt.sig2_r, decimal=4)\n', (8434, 8468), False, 'from numpy.testing import assert_almost_equal\n'), ((8623, 8683), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['mdf.likeval', 'rslt.loglike[0]'], {'decimal': '(2)'}), '(mdf.likeval, rslt.loglike[0], decimal=2)\n', (8642, 8683), False, 'from numpy.testing import assert_almost_equal\n'), ((9127, 9159), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""'], {}), "(cur_dir, 'results')\n", (9139, 9159), False, 'import os\n'), ((9177, 9193), 'os.listdir', 'os.listdir', (['rdir'], {}), '(rdir)\n', (9187, 9193), False, 'import os\n'), ((1402, 1435), 'numpy.atleast_2d', 'np.atleast_2d', (['self.ranef_condvar'], {}), '(self.ranef_condvar)\n', (1415, 1435), True, 'import numpy as np\n'), ((1500, 1525), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1515, 1525), False, 'import os\n'), ((4321, 4335), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (4330, 4335), True, 'import numpy as np\n'), ((4373, 4399), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (4389, 4399), True, 'import numpy as np\n'), ((4451, 4477), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300)'}), '(size=300)\n', (4467, 4477), True, 'import numpy as np\n'), ((4788, 4802), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (4797, 4802), True, 'import numpy as np\n'), ((4840, 4866), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (4856, 4866), True, 'import numpy as np\n'), ((4918, 4944), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300)'}), '(size=300)\n', (4934, 4944), True, 'import numpy as np\n'), ((5175, 5189), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (5184, 5189), True, 'import numpy as np\n'), ((5352, 5378), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(300)'}), '(size=300)\n', (5368, 5378), True, 'import numpy as np\n'), ((6298, 6312), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (6307, 6312), True, 'import numpy as np\n'), ((6314, 6324), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (6321, 6324), True, 'import numpy as np\n'), ((6490, 6516), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(400)'}), '(size=400)\n', (6506, 6516), True, 'import numpy as np\n'), ((9085, 9110), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (9100, 9110), False, 'import os\n'), ((4493, 4521), 'statsmodels.regression.lme.MixedLM', 'MixedLM', (['endog', 'exog', 'groups'], {}), '(endog, exog, groups)\n', (4500, 4521), False, 'from statsmodels.regression.lme import MixedLM\n'), ((4960, 4988), 'statsmodels.regression.lme.MixedLM', 'MixedLM', (['endog', 'exog', 'groups'], {}), '(endog, exog, groups)\n', (4967, 4988), False, 'from statsmodels.regression.lme import MixedLM\n'), ((5237, 5263), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (5253, 5263), True, 'import numpy as np\n'), ((5395, 5432), 'statsmodels.regression.lme.MixedLM', 'MixedLM', (['endog', 'exog', 'groups', 'exog_re'], {}), '(endog, exog, groups, exog_re)\n', (5402, 5432), False, 'from statsmodels.regression.lme import MixedLM\n'), ((7699, 7721), 'numpy.diag', 'np.diag', (['rslt.cov_re_r'], {}), '(rslt.cov_re_r)\n', (7706, 7721), True, 'import numpy as np\n'), ((2547, 2572), 'statsmodels.base._penalties.PSD', 'penalties.PSD', (['cov_pen_wt'], {}), '(cov_pen_wt)\n', (2560, 2572), True, 'from statsmodels.base import _penalties as penalties\n'), ((2594, 2619), 'numpy.random.seed', 'np.random.seed', (['(355890504)'], {}), '(355890504)\n', (2608, 2619), True, 'import numpy as np\n'), ((2650, 2683), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n * m, p)'}), '(size=(n * m, p))\n', (2666, 2683), True, 'import numpy as np\n'), ((2712, 2746), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n * m, pr)'}), '(size=(n * m, pr))\n', (2728, 2746), True, 'import numpy as np\n'), ((2902, 2942), 'statsmodels.regression.lme.MixedLM', 'MixedLM', (['endog', 'exog_fe', 'groups', 'exog_re'], {}), '(endog, exog_fe, groups, exog_re)\n', (2909, 2942), False, 'from statsmodels.regression.lme import MixedLM\n'), ((4572, 4584), 'numpy.ones', 'np.ones', (['(300)'], {}), '(300)\n', (4579, 4584), True, 'import numpy as np\n'), ((6431, 6457), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (6447, 6457), True, 'import numpy as np\n'), ((6459, 6469), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (6466, 6469), True, 'import numpy as np\n'), ((6769, 6779), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (6776, 6779), True, 'import numpy as np\n'), ((2790, 2818), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n * m)'}), '(size=n * m)\n', (2806, 2818), True, 'import numpy as np\n'), ((2864, 2874), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (2871, 2874), True, 'import numpy as np\n'), ((3386, 3410), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'p'}), '(size=p)\n', (3402, 3410), True, 'import numpy as np\n'), ((3444, 3475), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(pr, pr)'}), '(size=(pr, pr))\n', (3460, 3475), True, 'import numpy as np\n'), ((3508, 3532), 'numpy.dot', 'np.dot', (['cov_re.T', 'cov_re'], {}), '(cov_re.T, cov_re)\n', (3514, 3532), True, 'import numpy as np\n'), ((3678, 3695), 'numpy.zeros_like', 'np.zeros_like', (['gr'], {}), '(gr)\n', (3691, 3695), True, 'import numpy as np\n'), ((8245, 8258), 'numpy.ones', 'np.ones', (['k_fe'], {}), '(k_fe)\n', (8252, 8258), True, 'import numpy as np\n'), ((8260, 8272), 'numpy.eye', 'np.eye', (['k_re'], {}), '(k_re)\n', (8266, 8272), True, 'import numpy as np\n'), ((3967, 4006), 'scipy.misc.derivative', 'derivative', (['f', 'params_prof[k]'], {'dx': '(1e-06)'}), '(f, params_prof[k], dx=1e-06)\n', (3977, 4006), False, 'from scipy.misc import derivative\n')] |
from numba import jit
import numpy as np
import cv2
random = np.array(np.power(np.random.rand(16, 8, 3), 3) * 255, dtype=np.uint8)
class Camera:
def _resize_frame(self, frame, dst, flip=0):
frame_shape = np.shape(frame)
frame_crop_height = int(frame_shape[1] / self._ratio)
crop_offset = (frame_shape[0] - frame_crop_height) // 2
if crop_offset > 0:
cropped_frame = frame[crop_offset:-crop_offset, :, :]
else:
cropped_frame = frame
if flip == 1: # horizontal
cv2.resize(cv2.flip(cropped_frame, 1), self._size, dst=dst)
elif flip == 2: # verticle
cv2.resize(cv2.flip(cropped_frame, 0), self._size, dst=dst)
elif flip == 3: # both
cv2.resize(cv2.flip(cropped_frame, -1), self._size, dst=dst)
else:
cv2.resize(cropped_frame, self._size, dst=dst)
def __init__(self, size=(640,360), camera_index=0, no_cam_allowed=False):
self._no_cam_allowed = no_cam_allowed
self._cap = cv2.VideoCapture(camera_index)
self._size = size
self._ratio = size[0] / size[1]
self._fgbg = cv2.createBackgroundSubtractorKNN()
self._mask = np.zeros(self._size[::-1], dtype=np.uint8)
self._input_frame = np.zeros((*self._size[::-1], 3), dtype=np.uint8)
self._hsv_field = np.zeros((*self._size[::-1], 3), dtype=np.uint8)
self._last_grey = np.zeros(self._size[::-1], dtype=np.uint8)
self._current_grey = np.zeros(self._size[::-1], dtype=np.uint8)
if not self._cap.isOpened():
# random = np.array(np.power(np.random.rand(16, 8, 3), 3) * 255, dtype=np.uint8)
self._resize_frame(random, dst=self._input_frame)
''' HSV test image
test_image = np.zeros_like(self._input_frame, dtype=np.uint8)
x = np.linspace(0, 255, size[0], dtype=np.uint8)
y = np.linspace(255, 0, size[1], dtype=np.uint8)
XX, YY = np.meshgrid(x, y)
test_image[:, :, 1] = XX
test_image[:, :, 2] = YY
self._input_frame = cv2.cvtColor(test_image, cv2.COLOR_HSV2BGR)
'''
def __del__(self):
self._cap.release()
@jit
def update(self, bg_option, mirror_screen, mask_level, mask_width):
if self._cap.isOpened():
# update frame if webcam is active
ret, frame = self._cap.read()
if ret:
self._resize_frame(frame, self._input_frame, mirror_screen)
else:
# else use a random image
self._resize_frame(random, self._input_frame, mirror_screen)
self._last_grey[:] = self._current_grey
cv2.cvtColor(self._input_frame, cv2.COLOR_BGR2GRAY, dst=self._current_grey)
if bg_option == 3: # background subtraction
self._mask[:] = self._fgbg.apply(self._input_frame, learningRate=0.003)
else:
self._mask[:] = 0
cv2.cvtColor(self._input_frame, cv2.COLOR_BGR2HSV, dst=self._hsv_field)
if bg_option == 2: # hue
x = np.abs(np.array(self._hsv_field[:,:,0], np.float) / 180 - mask_level)
self._mask[x > mask_width] = 255
elif bg_option == 0: # white
x = np.array(self._hsv_field[:,:,1], np.float) / 255
x = 1 / mask_width * x * x + mask_level
y = np.array(self._hsv_field[:,:,2], np.float) / 255
self._mask[y <= x] = 255
else: # black
self._mask[self._hsv_field[:,:,2] > (255 * (1 - mask_level))] = 255
def reset(self):
if not self._cap.isOpened():
random[:] = np.array(np.power(np.random.rand(16, 8, 3), 3) * 255, dtype=np.uint8)
@property
def active(self):
return self._cap.isOpened() or self._no_cam_allowed
@property
def shape(self):
return self._size
@property
def input_frame(self):
return self._input_frame
@property
def mask(self):
return self._mask
@property
def current_grey(self):
return self._current_grey
@property
def last_grey(self):
return self._last_grey
@jit
def get_mask(self, size, transpose):
return cv2.resize(self._mask, size).T
| [
"numpy.random.rand",
"cv2.resize",
"cv2.flip",
"numpy.array",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.shape",
"cv2.createBackgroundSubtractorKNN"
] | [((219, 234), 'numpy.shape', 'np.shape', (['frame'], {}), '(frame)\n', (227, 234), True, 'import numpy as np\n'), ((1040, 1070), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_index'], {}), '(camera_index)\n', (1056, 1070), False, 'import cv2\n'), ((1158, 1193), 'cv2.createBackgroundSubtractorKNN', 'cv2.createBackgroundSubtractorKNN', ([], {}), '()\n', (1191, 1193), False, 'import cv2\n'), ((1215, 1257), 'numpy.zeros', 'np.zeros', (['self._size[::-1]'], {'dtype': 'np.uint8'}), '(self._size[::-1], dtype=np.uint8)\n', (1223, 1257), True, 'import numpy as np\n'), ((1286, 1334), 'numpy.zeros', 'np.zeros', (['(*self._size[::-1], 3)'], {'dtype': 'np.uint8'}), '((*self._size[::-1], 3), dtype=np.uint8)\n', (1294, 1334), True, 'import numpy as np\n'), ((1361, 1409), 'numpy.zeros', 'np.zeros', (['(*self._size[::-1], 3)'], {'dtype': 'np.uint8'}), '((*self._size[::-1], 3), dtype=np.uint8)\n', (1369, 1409), True, 'import numpy as np\n'), ((1437, 1479), 'numpy.zeros', 'np.zeros', (['self._size[::-1]'], {'dtype': 'np.uint8'}), '(self._size[::-1], dtype=np.uint8)\n', (1445, 1479), True, 'import numpy as np\n'), ((1509, 1551), 'numpy.zeros', 'np.zeros', (['self._size[::-1]'], {'dtype': 'np.uint8'}), '(self._size[::-1], dtype=np.uint8)\n', (1517, 1551), True, 'import numpy as np\n'), ((2713, 2788), 'cv2.cvtColor', 'cv2.cvtColor', (['self._input_frame', 'cv2.COLOR_BGR2GRAY'], {'dst': 'self._current_grey'}), '(self._input_frame, cv2.COLOR_BGR2GRAY, dst=self._current_grey)\n', (2725, 2788), False, 'import cv2\n'), ((80, 104), 'numpy.random.rand', 'np.random.rand', (['(16)', '(8)', '(3)'], {}), '(16, 8, 3)\n', (94, 104), True, 'import numpy as np\n'), ((2982, 3053), 'cv2.cvtColor', 'cv2.cvtColor', (['self._input_frame', 'cv2.COLOR_BGR2HSV'], {'dst': 'self._hsv_field'}), '(self._input_frame, cv2.COLOR_BGR2HSV, dst=self._hsv_field)\n', (2994, 3053), False, 'import cv2\n'), ((4280, 4308), 'cv2.resize', 'cv2.resize', (['self._mask', 'size'], {}), '(self._mask, size)\n', (4290, 4308), False, 'import cv2\n'), ((562, 588), 'cv2.flip', 'cv2.flip', (['cropped_frame', '(1)'], {}), '(cropped_frame, 1)\n', (570, 588), False, 'import cv2\n'), ((669, 695), 'cv2.flip', 'cv2.flip', (['cropped_frame', '(0)'], {}), '(cropped_frame, 0)\n', (677, 695), False, 'import cv2\n'), ((848, 894), 'cv2.resize', 'cv2.resize', (['cropped_frame', 'self._size'], {'dst': 'dst'}), '(cropped_frame, self._size, dst=dst)\n', (858, 894), False, 'import cv2\n'), ((772, 799), 'cv2.flip', 'cv2.flip', (['cropped_frame', '(-1)'], {}), '(cropped_frame, -1)\n', (780, 799), False, 'import cv2\n'), ((3291, 3335), 'numpy.array', 'np.array', (['self._hsv_field[:, :, 1]', 'np.float'], {}), '(self._hsv_field[:, :, 1], np.float)\n', (3299, 3335), True, 'import numpy as np\n'), ((3416, 3460), 'numpy.array', 'np.array', (['self._hsv_field[:, :, 2]', 'np.float'], {}), '(self._hsv_field[:, :, 2], np.float)\n', (3424, 3460), True, 'import numpy as np\n'), ((3717, 3741), 'numpy.random.rand', 'np.random.rand', (['(16)', '(8)', '(3)'], {}), '(16, 8, 3)\n', (3731, 3741), True, 'import numpy as np\n'), ((3118, 3162), 'numpy.array', 'np.array', (['self._hsv_field[:, :, 0]', 'np.float'], {}), '(self._hsv_field[:, :, 0], np.float)\n', (3126, 3162), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
import sys
# 2018.05.29
# create Color R,G,B Range
Color_Range = []
Color_Diff = int(256 / 4)
for r in range(256):
for g in range(256):
for b in range(256):
if r % Color_Diff == 0 and g % Color_Diff == 0 and b % Color_Diff == 0:
Color_Range.append([r, g, b])
Color_Range = np.array(Color_Range)
print(Color_Range.shape)
# print(Color_Range)
image = Image.open("test.jpg")
W, H = image.size
image_array = np.array(image)
print("image shape:", image_array.shape)
def gcd(m, n):
return m if n == 0 else gcd(n, m % n)
def sample_crop(Bg_image_array, UL_local_tuple, LR_local_tuple):
"""
# parameter:
image the background image by numpy_array
UL_local_tuple (x1,y1) the location of Upper Left Corner
LR_local_tuple (x2,y2) the location of Lower Right Corner
"""
(x1, y1) = UL_local_tuple
(x2, y2) = LR_local_tuple
x1 = int(x1)
x2 = int(x2)
y1 = int(y1)
y2 = int(y2)
image_crop_H = y2 - y1
image_crop_W = x2 - x1
sample_array = np.zeros((image_crop_H, image_crop_W, Bg_image_array.shape[2]), dtype=np.uint8)
for i in range(image_crop_H): # H
for j in range(image_crop_W): # W
sample_array[i][j] = Bg_image_array[i + y1][j + x1]
return sample_array
def cube_center_RGB(cube_array):
cube_h = cube_array.shape[0]
cube_w = cube_array.shape[1]
mid_h = int(cube_h/2)
mid_w = int(cube_w/2)
return cube_array[mid_h,mid_w]
# Method_1: Use the each R,G,B mean value of cube
def image_cube(image_array, cube_size):
"""
image_array: image by numpy array, dtype = numpy.uint8
cube_size: (h,w) tuple, and the size must a suitable value:
image_size % tuple size == 0
:return:
"""
image_h = image_array.shape[0]
image_w = image_array.shape[1]
(cube_h, cube_w) = cube_size
# check the size value
if image_h % cube_h != 0 or image_w % cube_w != 0:
print("Error!the cube_size is not suitable the image size!")
print(" image_size % tuple size == 0")
sys.exit() # exit the process
image_output = np.zeros(image_array.shape, dtype=np.uint8)
cube_h_ruler = []
cube_w_ruler = []
cube_ruler_sqr = []
for h in range(image_h + 1): # +1 for the last number
if h % cube_h == 0:
cube_h_ruler.append(h)
for w in range(image_w + 1): # +1 for the last number
if w % cube_w == 0:
cube_w_ruler.append(w)
print("set Cube size (cube_h,cube_w):", (cube_h, cube_w))
print("Cube_h_ruler:", cube_h_ruler)
print("cube_w_ruler:", cube_w_ruler)
"""
for h in cube_h_ruler:
for w in cube_w_ruler:
cube_ruler_sqr.append([h, w])
cube_ruler_sqr = np.array(cube_ruler_sqr)
print("cube_ruler_sqr:",cube_ruler_sqr.shape)
cube_ruler_sqr = cube_ruler_sqr.reshape(int(image_h / cube_h) + 1, int(image_w / cube_w) + 1, 2)
print(cube_ruler_sqr[:, :])
"""
cube_array = np.array([])
# find the mean of each cube array, and set the R,G,B value of the new Image array
# for num_cube in range((int(image_h / cube_h) + 1)*(int(image_w / cube_w)
# + 1)):
for mark_h in cube_h_ruler: # h
for mark_w in cube_w_ruler: # w
if mark_h == image_h or mark_w == image_w:
continue
else:
# print((mark_h, mark_w), "\t", end="")
# copy cube_size element to cube array
# print("location: UL(x1, y1):", (mark_w, mark_h), "DR(x2,y2):", (mark_w + cube_w, mark_h + cube_h))
cube_array = sample_crop(image_array, (mark_w, mark_h), (mark_w + cube_w, mark_h + cube_h))
cube_array_center = cube_center_RGB(cube_array)
cube_array = cube_array.reshape(cube_array.shape[0] * cube_array.shape[1], cube_array.shape[2])
cube_array_mean = cube_array.mean(axis=0)
'''
for h in range(cube_h):
for w in range(cube_w):
cube_array[h,w] = cube_array_mean
'''
# print(cube_array_mean)
# use the mean of color to fill the cube size
for h in range(cube_h):
for w in range(cube_w):
image_output[h + mark_h, w + mark_w] = cube_array_mean
""""""
# print(image_output.shape)
# image_output[0,0] = cube_array_mean
# print(image_output)
return image_output
# Method_2: Use the close Color for each pixel
def return_close_color(color_tuple):
# TODO find a better way to find suitable value
# for r in Color_Range[]
"""Temporary solution"""
div_color = int(256 / 128)
r = color_tuple[0]
g = color_tuple[1]
b = color_tuple[2]
r = int(r / div_color + 0.5)
g = int(g / div_color + 0.5)
b = int(b / div_color + 0.5)
return [r * div_color, g * div_color, b * div_color]
def use_close_color(image_array):
"""
image_array: image by numpy array, dtype = numpy.uint8
RGB_range: RGB_range is a numpy array to save the KEY Color Value
:return:
"""
for h in range(image_array.shape[0]):
for w in range(image_array.shape[1]):
image_array[h][w] = return_close_color(image_array[h][w][:])
return image_array
# (h,w) cube_size
img_output = image_cube(image_array, (3, 4))
for i in range(H):
for j in range(W):
image_array[i,j] = img_output[i,j]
img = Image.fromarray(img_output)
img.save("img_output_method1.jpg")
image = Image.open("test.jpg")
image_array = np.array(image)
img_output = use_close_color(image_array)
img = Image.fromarray(img_output)
img.save("img_output_method2.jpg")
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.array",
"numpy.zeros",
"sys.exit"
] | [((357, 378), 'numpy.array', 'np.array', (['Color_Range'], {}), '(Color_Range)\n', (365, 378), True, 'import numpy as np\n'), ((435, 457), 'PIL.Image.open', 'Image.open', (['"""test.jpg"""'], {}), "('test.jpg')\n", (445, 457), False, 'from PIL import Image\n'), ((490, 505), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (498, 505), True, 'import numpy as np\n'), ((5587, 5614), 'PIL.Image.fromarray', 'Image.fromarray', (['img_output'], {}), '(img_output)\n', (5602, 5614), False, 'from PIL import Image\n'), ((5659, 5681), 'PIL.Image.open', 'Image.open', (['"""test.jpg"""'], {}), "('test.jpg')\n", (5669, 5681), False, 'from PIL import Image\n'), ((5696, 5711), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (5704, 5711), True, 'import numpy as np\n'), ((5760, 5787), 'PIL.Image.fromarray', 'Image.fromarray', (['img_output'], {}), '(img_output)\n', (5775, 5787), False, 'from PIL import Image\n'), ((1113, 1192), 'numpy.zeros', 'np.zeros', (['(image_crop_H, image_crop_W, Bg_image_array.shape[2])'], {'dtype': 'np.uint8'}), '((image_crop_H, image_crop_W, Bg_image_array.shape[2]), dtype=np.uint8)\n', (1121, 1192), True, 'import numpy as np\n'), ((2198, 2241), 'numpy.zeros', 'np.zeros', (['image_array.shape'], {'dtype': 'np.uint8'}), '(image_array.shape, dtype=np.uint8)\n', (2206, 2241), True, 'import numpy as np\n'), ((3063, 3075), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3071, 3075), True, 'import numpy as np\n'), ((2147, 2157), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2155, 2157), False, 'import sys\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 15:50:52 2018
@author: huangdou
"""
import numpy as np
import matplotlib.pyplot as plt
import time
from statsmodels.tsa.arima_model import ARIMA
from pandas.tools.plotting import autocorrelation_plot
from multiprocessing import Pool
import os
import glob
from sklearn.decomposition import PCA
import pickle
from scoreARIMA import *
def pca(n_components, data, filename):
model = PCA(n_components=n_components)
principaComponents = model.fit_transform(data)
pickle.dump(model, open(filename, 'wb'))
return principaComponents
def arima_parallel_(filename):
train_ = np.load('../data/temp/train/' + filename)
test_ = np.load('../data/temp/test/' + filename)
result = np.zeros((264, 12))
# for i in range(1):
start = time.time()
history = [x for x in train_[:, 0]]
# print(i, time.ctime())
for t in range(264):
# if t % 12 == 0:
# print(i, t, time.ctime())
model = ARIMA(history, order = (12, 1, 0))
model_fit = model.fit(disp = 0)
output = model_fit.forecast(12)[0]
yhat = output
obs = test_[t : t+1, 0]
history.append(obs[0])
result[t][:] = yhat
# print(time.ctime())
print(i, time.time() - start)
np.save('../data/temp/result/' + filename, result)
def findfile(scripts_path, file_path):
filelist = []
os.chdir(file_path)
for file in glob.glob('*'):
filelist.append(file)
os.chdir(scripts_path)
return filelist
if __name__ == '__main__':
filename = '../data/PCA.sav'
test = np.load('../data/popSpecial.npy')
train = np.load('../data/popTrainValidate.npy')
train_ = train.reshape((2880, 6400))
test_ = test.reshape((288, 6400))
## pca
data = []
data.extend(train_)
data.extend(test_)
temp_data = pca(64, data, filename)
for i in range(64):
filetrain = 'train/' + np.str(i)
filetest = 'test/' + np.str(i)
temp_train = temp_data[:2880, i : (i + 1)]
temp_test = temp_data[2880:, i : (i + 1)]
np.save('../data/temp/' + filetrain, temp_train)
np.save('../data/temp/' + filetest, temp_test)
# filelist = findfile('../../../scripts/', '../data/temp/train/')
# pool = Pool(7)
# pool.map(arima_parallel_, [file for file in filelist])
# Parallel(n_jobs = 8)(delayed(arima_parallel)(filename) for filename in filelist)
START, END = 0, 64
with open('./errorList.csv', 'w') as wf:
for i in range(START, END):
try:
print(i, time.ctime(), '#' * 20)
arima_parallel_(str(i) + '.npy')
except:
wf.write(str(i) + '\n')
print(i, 'ARIMA Failed', time.ctime())
results_temp = np.zeros((264, 12, 64))
for i in range(START, END):
temp = np.load('../data/temp/result/' + str(i) + '.npy')
results_temp[:, :, i] = temp
pca_ = pickle.load(open(filename, 'rb'))
results = pca_.inverse_transform(results_temp)
results = results.reshape((264, 12, 80, 80, 1))
# np.save('../data/popResult.npy', results)
test = np.load('../data/popSpecial.npy')
train, test = getXSYS(test, 12)
score(results, test)
| [
"time.ctime",
"sklearn.decomposition.PCA",
"os.chdir",
"numpy.zeros",
"glob.glob",
"statsmodels.tsa.arima_model.ARIMA",
"numpy.load",
"time.time",
"numpy.save",
"numpy.str"
] | [((459, 489), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (462, 489), False, 'from sklearn.decomposition import PCA\n'), ((661, 702), 'numpy.load', 'np.load', (["('../data/temp/train/' + filename)"], {}), "('../data/temp/train/' + filename)\n", (668, 702), True, 'import numpy as np\n'), ((715, 755), 'numpy.load', 'np.load', (["('../data/temp/test/' + filename)"], {}), "('../data/temp/test/' + filename)\n", (722, 755), True, 'import numpy as np\n'), ((769, 788), 'numpy.zeros', 'np.zeros', (['(264, 12)'], {}), '((264, 12))\n', (777, 788), True, 'import numpy as np\n'), ((825, 836), 'time.time', 'time.time', ([], {}), '()\n', (834, 836), False, 'import time\n'), ((1324, 1374), 'numpy.save', 'np.save', (["('../data/temp/result/' + filename)", 'result'], {}), "('../data/temp/result/' + filename, result)\n", (1331, 1374), True, 'import numpy as np\n'), ((1437, 1456), 'os.chdir', 'os.chdir', (['file_path'], {}), '(file_path)\n', (1445, 1456), False, 'import os\n'), ((1473, 1487), 'glob.glob', 'glob.glob', (['"""*"""'], {}), "('*')\n", (1482, 1487), False, 'import glob\n'), ((1523, 1545), 'os.chdir', 'os.chdir', (['scripts_path'], {}), '(scripts_path)\n', (1531, 1545), False, 'import os\n'), ((1640, 1673), 'numpy.load', 'np.load', (['"""../data/popSpecial.npy"""'], {}), "('../data/popSpecial.npy')\n", (1647, 1673), True, 'import numpy as np\n'), ((1686, 1725), 'numpy.load', 'np.load', (['"""../data/popTrainValidate.npy"""'], {}), "('../data/popTrainValidate.npy')\n", (1693, 1725), True, 'import numpy as np\n'), ((2858, 2881), 'numpy.zeros', 'np.zeros', (['(264, 12, 64)'], {}), '((264, 12, 64))\n', (2866, 2881), True, 'import numpy as np\n'), ((3222, 3255), 'numpy.load', 'np.load', (['"""../data/popSpecial.npy"""'], {}), "('../data/popSpecial.npy')\n", (3229, 3255), True, 'import numpy as np\n'), ((1014, 1046), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['history'], {'order': '(12, 1, 0)'}), '(history, order=(12, 1, 0))\n', (1019, 1046), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((2141, 2189), 'numpy.save', 'np.save', (["('../data/temp/' + filetrain)", 'temp_train'], {}), "('../data/temp/' + filetrain, temp_train)\n", (2148, 2189), True, 'import numpy as np\n'), ((2198, 2244), 'numpy.save', 'np.save', (["('../data/temp/' + filetest)", 'temp_test'], {}), "('../data/temp/' + filetest, temp_test)\n", (2205, 2244), True, 'import numpy as np\n'), ((1299, 1310), 'time.time', 'time.time', ([], {}), '()\n', (1308, 1310), False, 'import time\n'), ((1983, 1992), 'numpy.str', 'np.str', (['i'], {}), '(i)\n', (1989, 1992), True, 'import numpy as np\n'), ((2022, 2031), 'numpy.str', 'np.str', (['i'], {}), '(i)\n', (2028, 2031), True, 'import numpy as np\n'), ((2646, 2658), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2656, 2658), False, 'import time\n'), ((2820, 2832), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2830, 2832), False, 'import time\n')] |
from pytinyexr import PyEXRImage
exrImage = PyEXRImage('2by2.exr')
print(exrImage.filename)
print(exrImage.width)
print(exrImage.height)
print(exrImage)
# Direct access to floats
for i in range(exrImage.width * exrImage.height):
r = exrImage.get(4 * i + 0)
g = exrImage.get(4 * i + 1)
b = exrImage.get(4 * i + 2)
a = exrImage.get(4 * i + 3)
print(r,g,b,a)
print ('--------------------------')
# Access by index x,y,channel
for y in range(exrImage.width):
for x in range(exrImage.height):
r = exrImage.getPixel(x,y,0)
g = exrImage.getPixel(x,y,1)
b = exrImage.getPixel(x,y,2)
a = exrImage.getPixel(x,y,3)
print(r,g,b,a)
print ('--------------------------')
# Convert to NumPy
import numpy as np
m = np.array(exrImage, copy = False)
print (m)
print ('--------------------------')
# Shape into x,y,channel matrix
t = np.reshape(m, (exrImage.width, exrImage.height, 4))
print (t)
print ('--------------------------')
print(t[0][0][:])
print(t[0][1][:])
print(t[1][0][:])
print(t[1][1][:])
| [
"numpy.array",
"pytinyexr.PyEXRImage",
"numpy.reshape"
] | [((45, 67), 'pytinyexr.PyEXRImage', 'PyEXRImage', (['"""2by2.exr"""'], {}), "('2by2.exr')\n", (55, 67), False, 'from pytinyexr import PyEXRImage\n'), ((768, 798), 'numpy.array', 'np.array', (['exrImage'], {'copy': '(False)'}), '(exrImage, copy=False)\n', (776, 798), True, 'import numpy as np\n'), ((885, 936), 'numpy.reshape', 'np.reshape', (['m', '(exrImage.width, exrImage.height, 4)'], {}), '(m, (exrImage.width, exrImage.height, 4))\n', (895, 936), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
class ResultDrawer:
def __init__(self, row=6, col=4):
self.num_rows = row
self.num_cols = col
self.class_names = ['male', 'female']
def plot_image(self, i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap='gray')
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(self.class_names[predicted_label],
100*np.max(predictions_array),
self.class_names[true_label]),
color=color)
def plot_value_array(self, i, predictions_array, true_label):
true_label = true_label[i]
plt.grid(False)
plt.xticks(range(2))
plt.yticks([])
thisplot = plt.bar(range(2), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
def plot(self, model, test_images, test_labels):
predictions = model.predict(test_images)
num_images = self.num_rows * self.num_cols
plt.figure(figsize=(2 * 2 * self.num_cols, 2 * self.num_rows))
for i in range(num_images):
plt.subplot(self.num_rows, 2 * self.num_cols, 2 * i + 1)
self.plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(self.num_rows, 2 * self.num_cols, 2 * i + 2)
self.plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"numpy.argmax",
"numpy.max",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((335, 350), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (343, 350), True, 'import matplotlib.pyplot as plt\n'), ((359, 373), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (369, 373), True, 'import matplotlib.pyplot as plt\n'), ((382, 396), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (392, 396), True, 'import matplotlib.pyplot as plt\n'), ((405, 433), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (415, 433), True, 'import matplotlib.pyplot as plt\n'), ((461, 489), 'numpy.argmax', 'np.argmax', (['predictions_array'], {}), '(predictions_array)\n', (470, 489), True, 'import numpy as np\n'), ((974, 989), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (982, 989), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1041), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1037, 1041), True, 'import matplotlib.pyplot as plt\n'), ((1123, 1139), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (1131, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1194), 'numpy.argmax', 'np.argmax', (['predictions_array'], {}), '(predictions_array)\n', (1175, 1194), True, 'import numpy as np\n'), ((1456, 1518), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * 2 * self.num_cols, 2 * self.num_rows)'}), '(figsize=(2 * 2 * self.num_cols, 2 * self.num_rows))\n', (1466, 1518), True, 'import matplotlib.pyplot as plt\n'), ((1842, 1860), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1858, 1860), True, 'import matplotlib.pyplot as plt\n'), ((1869, 1879), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1877, 1879), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1624), 'matplotlib.pyplot.subplot', 'plt.subplot', (['self.num_rows', '(2 * self.num_cols)', '(2 * i + 1)'], {}), '(self.num_rows, 2 * self.num_cols, 2 * i + 1)\n', (1579, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1710, 1766), 'matplotlib.pyplot.subplot', 'plt.subplot', (['self.num_rows', '(2 * self.num_cols)', '(2 * i + 2)'], {}), '(self.num_rows, 2 * self.num_cols, 2 * i + 2)\n', (1721, 1766), True, 'import matplotlib.pyplot as plt\n'), ((729, 754), 'numpy.max', 'np.max', (['predictions_array'], {}), '(predictions_array)\n', (735, 754), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 09:58:50 2020
@author: duttar
Description: Solving the problem A = Bx
A is the timeseries stack of InSAR pixel wise
B is matrix including time and ADDT
x is a vector containing seasonal and overall subsidence
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import h5py
from datetime import datetime as dt
import multiprocessing
from joblib import Parallel, delayed
from functools import partial
import scipy.io as sio
def datenum(d):
'''
Serial date number
used for SBI_Year
'''
return 366 + d.toordinal() + (d - dt.fromordinal(d.toordinal())).total_seconds()/(24*60*60)
def SBI_Year(imd):
'''
A simple script that takes in numbers on the format '19990930'
and changes to a number format 1999.7590
imd - numpy (n,1) Vector with dates in strings
out - numpy (n,1) Vector with dates in int
created by <NAME>
'''
dstr = imd
nd = imd.shape[0]
out = np.zeros((nd,1))
for k in range(nd):
# get first day of year, minus 1/2 a day:
d1 = dt.strptime(dstr[k][0][0:4]+'0101', '%Y%m%d')
dn1 = datenum(d1) - 0.5
# get last day of year, plus 0.5
d2 = dt.strptime(dstr[k][0][0:4]+'1231', '%Y%m%d')
dne = datenum(d2) + 0.5
# get number of days in that year:
ndays = dne - dn1
# get day of year:
d3 = dt.strptime(dstr[k][0], '%Y%m%d')
doy = datenum(d3) - dn1
# get fractional year:
fracyr = doy/ndays
out[k] = int(dstr[k][0][0:4])+ fracyr
return out
# work directory
proj_dir = os.path.expanduser('/data/not_backed_up/rdtta/Permafrost/Alaska/North_slope/DT102/Stack/timeseries')
# file in geo coordinates
geom_file = os.path.join(proj_dir, 'geo/geo_geometryRadar.h5')
ts_file = os.path.join(proj_dir, 'geo/geo_timeseries_ramp_demErr.h5')
maskfile = os.path.join(proj_dir, 'geo/geo_maskTempCoh.h5')
with h5py.File(ts_file, "r") as f:
# read the timeseries file
a_group_key = list(f.keys())[2]
ts_data = list(f[a_group_key])
a_group_key = list(f.keys())[1]
dates = list(f[a_group_key])
with h5py.File(geom_file, "r") as f:
# read the geometry file
a_group_key = list(f.keys())[0]
azim_angle = list(f[a_group_key])
a_group_key = list(f.keys())[1]
height = list(f[a_group_key])
a_group_key = list(f.keys())[2]
inc_angle = list(f[a_group_key])
a_group_key = list(f.keys())[3]
latitude = list(f[a_group_key])
a_group_key = list(f.keys())[4]
longitude = list(f[a_group_key])
with h5py.File(maskfile, "r") as f:
a_group_key = list(f.keys())[0]
maskbool = list(f[a_group_key])
maskbool = np.array(maskbool)
# convert dates from type 'bytes' to string
numdates = np.size(dates)
datesn = np.empty([numdates, 1], dtype="<U10")
dates_int = np.zeros((numdates,1))
for i in range(numdates):
datesn[i] = dates[i].decode("utf-8")
dates_int[i] = int(dates[i].decode("utf-8"))
dates_frac = SBI_Year(datesn)
# select the dates to put in matrix A
inddates = np.zeros((numdates,1))
for i in range(numdates):
dates_i = dates_frac[i]
frac_part = dates_i - np.floor(dates_i)
if frac_part < .41506849 :
inddates[i] = 0
elif frac_part > .81506849 :
inddates[i] = 0
else:
inddates[i] = 1
include_dates = np.where(inddates == 1)[0]
print('included dates for estimation are: \n', datesn[include_dates])
dates_frac_included = dates_frac[include_dates]
# load the addt files
dates_floor = np.floor(dates_frac_included)
for i in range(include_dates.shape[0]-1):
if i == 0:
years_incl = dates_floor[i]
if dates_floor[i+1] != dates_floor[i]:
years_incl = np.concatenate((years_incl, dates_floor[i+1]), axis=0)
a_dictionary = {}
mat_c1 = np.empty(())
for years in years_incl:
varmat_load = 'data_temp_addt/ds633/addt' + str(np.int(years)) + '.mat'
mat_c1 = sio.loadmat(varmat_load)
lonlat = mat_c1['lonlat']
var_addt = 'addt' + str(np.int(years))
a_dictionary["addt_%s" %np.int(years)] = mat_c1[var_addt]
a_dictionary["detailsaddt_%s" %np.int(years)] = mat_c1['details_addt']
# get the timeseries data attributes
ifglen = np.shape(longitude)[0]
ifgwid = np.shape(longitude)[1]
ts_data = np.array(ts_data)
longitude = np.array(longitude)
latitude = np.array(latitude)
numpixels = ifglen*ifgwid
lonvals_addt = lonlat[:,0] - 360
latvals_addt = lonlat[:,1]
# normalize the addt values with the overall maximum value
maxaddt = np.zeros((years_incl.shape[0], 1))
i = 0
for years in years_incl:
varaddt = "addt_" + str(np.int(years))
maxaddt[i] = np.max(a_dictionary[varaddt])
i = i+1
maxaddtall = np.max(maxaddt) # maximum addt value
addt_pixelwise = np.zeros((include_dates.shape[0], ifglen, ifgwid))
for i in range(numpixels):
if np.mod(i, 50000) == 0:
print('loops completed: ', i)
ind_len = np.mod(i+1, ifglen) - 1
if np.mod(i+1, ifglen) == 0:
ind_len = ifglen -1
ind_wid = np.int(np.floor((i+1)/ifglen)) - 1
if maskbool[ind_len, ind_wid] == False:
continue
# get the latitude and longitude at the index
lon_valind = longitude[ind_len, ind_wid]
lat_valind = latitude[ind_len, ind_wid]
# find the closest lonlat of the addt values
abs_dist_lon = np.abs(lonvals_addt - lon_valind)
abs_dist_lat = np.abs(latvals_addt - lat_valind)
ind_close1 = np.where(abs_dist_lon == np.min(abs_dist_lon))
ind_close2 = np.where(abs_dist_lat == np.min(abs_dist_lat))
indcommon = np.intersect1d(ind_close1, ind_close2)
if indcommon.shape[0] > 1:
indcommon = indcommon[0]
ind_tsdate = 0
# go through the time series dates and find the corresponding addt values
for day in dates_frac_included:
if np.mod(np.floor(day),4) > 0:
leapdays = 365
else:
leapdays = 366
dayindex = (day - np.floor(day))* leapdays + .5
varaddt1 = "detailsaddt_" + str(np.int(np.floor(day)[0]))
firstday_addt = a_dictionary[varaddt1][indcommon, 1]
varaddt2 = "addt_" + str(np.int(np.floor(day)[0]))
if firstday_addt > dayindex:
addt_pixelwise[ind_tsdate, ind_len, ind_wid] = 1e-5
else:
day_diff = dayindex - firstday_addt
addt_pixelwise[ind_tsdate, ind_len, ind_wid] = a_dictionary[varaddt2][indcommon, np.int(np.round(day_diff[0]))]/maxaddtall
ind_tsdate = ind_tsdate + 1
hf = h5py.File('data.h5', 'r')
addt_pixelwise = hf.get('addt_ts')
addt_pixelwise = np.array(addt_pixelwise)
hf.close()
def x_est(arg_i, ifglen, dates_frac_included, include_dates, ts_data, addt_pixelwise):
'''
Estimate the overall and seasonal subsidence
'''
if np.int(np.mod(arg_i, 50000)) == 0:
print('in loop number : ', arg_i)
ind_len = np.mod(arg_i, ifglen) - 1
if np.mod(arg_i, ifglen) == 0:
ind_len = ifglen - 1
ind_wid = np.int(np.floor(arg_i/ifglen)) - 1
# check if masked
if maskbool[ind_len, ind_wid] == False:
return np.array([[np.nan],[np.nan],[np.nan]])
# get the matrix B
Bmat = ts_data[include_dates, ind_len, ind_wid]
Bmat = np.reshape(Bmat, (Bmat.shape[0], 1))
# get the matrix A
# first column is time in year (to get subsidence/year)
fir_colm = dates_frac_included - dates_frac_included[0]
sec_colm = -addt_pixelwise[:, ind_len, ind_wid]
sec_colm = np.reshape(sec_colm, (sec_colm.shape[0], 1))
thi_colm = np.ones((sec_colm.shape[0], 1))
Amat_1 = np.concatenate((fir_colm, sec_colm), axis = 1)
Amat = np.concatenate((Amat_1, thi_colm), axis = 1)
# solution of Ax = B
AtA = np.matmul(Amat.conj().transpose(), Amat)
AtB = np.matmul(Amat.conj().transpose(), Bmat)
solx = np.linalg.solve(AtA, AtB)
return solx
num_cores = multiprocessing.cpu_count()
x_est_ = partial(x_est, ifglen = ifglen, dates_frac_included = dates_frac_included, include_dates= include_dates, ts_data= ts_data, addt_pixelwise = addt_pixelwise)
output = Parallel(n_jobs=num_cores)(delayed(x_est_)(i) for i in range(numpixels))
subs_data = np.array(output)
hf = h5py.File('subs_data.h5', 'w')
hf.create_dataset('subs_data', data=subs_data)
hf.create_dataset('lon', data=longitude)
hf.create_dataset('lat', data=latitude)
hf.close()
var_name = 'subsdata.mat'
sio.savemat(var_name, {'subs_data':subs_data, \
'lon':longitude, 'lat':latitude})
| [
"scipy.io.savemat",
"scipy.io.loadmat",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.mod",
"numpy.reshape",
"numpy.where",
"numpy.max",
"numpy.empty",
"numpy.concatenate",
"numpy.min",
"os.path.expanduser",
"numpy.round",
"numpy.abs",
"numpy.ones",
"numpy.size",
"numpy.floor",
... | [((1645, 1755), 'os.path.expanduser', 'os.path.expanduser', (['"""/data/not_backed_up/rdtta/Permafrost/Alaska/North_slope/DT102/Stack/timeseries"""'], {}), "(\n '/data/not_backed_up/rdtta/Permafrost/Alaska/North_slope/DT102/Stack/timeseries'\n )\n", (1663, 1755), False, 'import os\n'), ((1785, 1835), 'os.path.join', 'os.path.join', (['proj_dir', '"""geo/geo_geometryRadar.h5"""'], {}), "(proj_dir, 'geo/geo_geometryRadar.h5')\n", (1797, 1835), False, 'import os\n'), ((1846, 1905), 'os.path.join', 'os.path.join', (['proj_dir', '"""geo/geo_timeseries_ramp_demErr.h5"""'], {}), "(proj_dir, 'geo/geo_timeseries_ramp_demErr.h5')\n", (1858, 1905), False, 'import os\n'), ((1917, 1965), 'os.path.join', 'os.path.join', (['proj_dir', '"""geo/geo_maskTempCoh.h5"""'], {}), "(proj_dir, 'geo/geo_maskTempCoh.h5')\n", (1929, 1965), False, 'import os\n'), ((2724, 2742), 'numpy.array', 'np.array', (['maskbool'], {}), '(maskbool)\n', (2732, 2742), True, 'import numpy as np\n'), ((2800, 2814), 'numpy.size', 'np.size', (['dates'], {}), '(dates)\n', (2807, 2814), True, 'import numpy as np\n'), ((2824, 2861), 'numpy.empty', 'np.empty', (['[numdates, 1]'], {'dtype': '"""<U10"""'}), "([numdates, 1], dtype='<U10')\n", (2832, 2861), True, 'import numpy as np\n'), ((2874, 2897), 'numpy.zeros', 'np.zeros', (['(numdates, 1)'], {}), '((numdates, 1))\n', (2882, 2897), True, 'import numpy as np\n'), ((3095, 3118), 'numpy.zeros', 'np.zeros', (['(numdates, 1)'], {}), '((numdates, 1))\n', (3103, 3118), True, 'import numpy as np\n'), ((3569, 3598), 'numpy.floor', 'np.floor', (['dates_frac_included'], {}), '(dates_frac_included)\n', (3577, 3598), True, 'import numpy as np\n'), ((3839, 3851), 'numpy.empty', 'np.empty', (['()'], {}), '(())\n', (3847, 3851), True, 'import numpy as np\n'), ((4314, 4331), 'numpy.array', 'np.array', (['ts_data'], {}), '(ts_data)\n', (4322, 4331), True, 'import numpy as np\n'), ((4344, 4363), 'numpy.array', 'np.array', (['longitude'], {}), '(longitude)\n', (4352, 4363), True, 'import numpy as np\n'), ((4375, 4393), 'numpy.array', 'np.array', (['latitude'], {}), '(latitude)\n', (4383, 4393), True, 'import numpy as np\n'), ((4553, 4587), 'numpy.zeros', 'np.zeros', (['(years_incl.shape[0], 1)'], {}), '((years_incl.shape[0], 1))\n', (4561, 4587), True, 'import numpy as np\n'), ((4735, 4750), 'numpy.max', 'np.max', (['maxaddt'], {}), '(maxaddt)\n', (4741, 4750), True, 'import numpy as np\n'), ((4790, 4840), 'numpy.zeros', 'np.zeros', (['(include_dates.shape[0], ifglen, ifgwid)'], {}), '((include_dates.shape[0], ifglen, ifgwid))\n', (4798, 4840), True, 'import numpy as np\n'), ((6523, 6548), 'h5py.File', 'h5py.File', (['"""data.h5"""', '"""r"""'], {}), "('data.h5', 'r')\n", (6532, 6548), False, 'import h5py\n'), ((6601, 6625), 'numpy.array', 'np.array', (['addt_pixelwise'], {}), '(addt_pixelwise)\n', (6609, 6625), True, 'import numpy as np\n'), ((7890, 7917), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (7915, 7917), False, 'import multiprocessing\n'), ((7927, 8083), 'functools.partial', 'partial', (['x_est'], {'ifglen': 'ifglen', 'dates_frac_included': 'dates_frac_included', 'include_dates': 'include_dates', 'ts_data': 'ts_data', 'addt_pixelwise': 'addt_pixelwise'}), '(x_est, ifglen=ifglen, dates_frac_included=dates_frac_included,\n include_dates=include_dates, ts_data=ts_data, addt_pixelwise=addt_pixelwise\n )\n', (7934, 8083), False, 'from functools import partial\n'), ((8178, 8194), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (8186, 8194), True, 'import numpy as np\n'), ((8200, 8230), 'h5py.File', 'h5py.File', (['"""subs_data.h5"""', '"""w"""'], {}), "('subs_data.h5', 'w')\n", (8209, 8230), False, 'import h5py\n'), ((8398, 8484), 'scipy.io.savemat', 'sio.savemat', (['var_name', "{'subs_data': subs_data, 'lon': longitude, 'lat': latitude}"], {}), "(var_name, {'subs_data': subs_data, 'lon': longitude, 'lat':\n latitude})\n", (8409, 8484), True, 'import scipy.io as sio\n'), ((1004, 1021), 'numpy.zeros', 'np.zeros', (['(nd, 1)'], {}), '((nd, 1))\n', (1012, 1021), True, 'import numpy as np\n'), ((1972, 1995), 'h5py.File', 'h5py.File', (['ts_file', '"""r"""'], {}), "(ts_file, 'r')\n", (1981, 1995), False, 'import h5py\n'), ((2179, 2204), 'h5py.File', 'h5py.File', (['geom_file', '"""r"""'], {}), "(geom_file, 'r')\n", (2188, 2204), False, 'import h5py\n'), ((2609, 2633), 'h5py.File', 'h5py.File', (['maskfile', '"""r"""'], {}), "(maskfile, 'r')\n", (2618, 2633), False, 'import h5py\n'), ((3384, 3407), 'numpy.where', 'np.where', (['(inddates == 1)'], {}), '(inddates == 1)\n', (3392, 3407), True, 'import numpy as np\n'), ((3966, 3990), 'scipy.io.loadmat', 'sio.loadmat', (['varmat_load'], {}), '(varmat_load)\n', (3977, 3990), True, 'import scipy.io as sio\n'), ((4249, 4268), 'numpy.shape', 'np.shape', (['longitude'], {}), '(longitude)\n', (4257, 4268), True, 'import numpy as np\n'), ((4281, 4300), 'numpy.shape', 'np.shape', (['longitude'], {}), '(longitude)\n', (4289, 4300), True, 'import numpy as np\n'), ((4680, 4709), 'numpy.max', 'np.max', (['a_dictionary[varaddt]'], {}), '(a_dictionary[varaddt])\n', (4686, 4709), True, 'import numpy as np\n'), ((5358, 5391), 'numpy.abs', 'np.abs', (['(lonvals_addt - lon_valind)'], {}), '(lonvals_addt - lon_valind)\n', (5364, 5391), True, 'import numpy as np\n'), ((5411, 5444), 'numpy.abs', 'np.abs', (['(latvals_addt - lat_valind)'], {}), '(latvals_addt - lat_valind)\n', (5417, 5444), True, 'import numpy as np\n'), ((5589, 5627), 'numpy.intersect1d', 'np.intersect1d', (['ind_close1', 'ind_close2'], {}), '(ind_close1, ind_close2)\n', (5603, 5627), True, 'import numpy as np\n'), ((7240, 7276), 'numpy.reshape', 'np.reshape', (['Bmat', '(Bmat.shape[0], 1)'], {}), '(Bmat, (Bmat.shape[0], 1))\n', (7250, 7276), True, 'import numpy as np\n'), ((7488, 7532), 'numpy.reshape', 'np.reshape', (['sec_colm', '(sec_colm.shape[0], 1)'], {}), '(sec_colm, (sec_colm.shape[0], 1))\n', (7498, 7532), True, 'import numpy as np\n'), ((7548, 7579), 'numpy.ones', 'np.ones', (['(sec_colm.shape[0], 1)'], {}), '((sec_colm.shape[0], 1))\n', (7555, 7579), True, 'import numpy as np\n'), ((7593, 7637), 'numpy.concatenate', 'np.concatenate', (['(fir_colm, sec_colm)'], {'axis': '(1)'}), '((fir_colm, sec_colm), axis=1)\n', (7607, 7637), True, 'import numpy as np\n'), ((7651, 7693), 'numpy.concatenate', 'np.concatenate', (['(Amat_1, thi_colm)'], {'axis': '(1)'}), '((Amat_1, thi_colm), axis=1)\n', (7665, 7693), True, 'import numpy as np\n'), ((7835, 7860), 'numpy.linalg.solve', 'np.linalg.solve', (['AtA', 'AtB'], {}), '(AtA, AtB)\n', (7850, 7860), True, 'import numpy as np\n'), ((8092, 8118), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (8100, 8118), False, 'from joblib import Parallel, delayed\n'), ((1108, 1155), 'datetime.datetime.strptime', 'dt.strptime', (["(dstr[k][0][0:4] + '0101')", '"""%Y%m%d"""'], {}), "(dstr[k][0][0:4] + '0101', '%Y%m%d')\n", (1119, 1155), True, 'from datetime import datetime as dt\n'), ((1241, 1288), 'datetime.datetime.strptime', 'dt.strptime', (["(dstr[k][0][0:4] + '1231')", '"""%Y%m%d"""'], {}), "(dstr[k][0][0:4] + '1231', '%Y%m%d')\n", (1252, 1288), True, 'from datetime import datetime as dt\n'), ((1429, 1462), 'datetime.datetime.strptime', 'dt.strptime', (['dstr[k][0]', '"""%Y%m%d"""'], {}), "(dstr[k][0], '%Y%m%d')\n", (1440, 1462), True, 'from datetime import datetime as dt\n'), ((3198, 3215), 'numpy.floor', 'np.floor', (['dates_i'], {}), '(dates_i)\n', (3206, 3215), True, 'import numpy as np\n'), ((3756, 3812), 'numpy.concatenate', 'np.concatenate', (['(years_incl, dates_floor[i + 1])'], {'axis': '(0)'}), '((years_incl, dates_floor[i + 1]), axis=0)\n', (3770, 3812), True, 'import numpy as np\n'), ((4875, 4891), 'numpy.mod', 'np.mod', (['i', '(50000)'], {}), '(i, 50000)\n', (4881, 4891), True, 'import numpy as np\n'), ((4950, 4971), 'numpy.mod', 'np.mod', (['(i + 1)', 'ifglen'], {}), '(i + 1, ifglen)\n', (4956, 4971), True, 'import numpy as np\n'), ((4982, 5003), 'numpy.mod', 'np.mod', (['(i + 1)', 'ifglen'], {}), '(i + 1, ifglen)\n', (4988, 5003), True, 'import numpy as np\n'), ((6890, 6911), 'numpy.mod', 'np.mod', (['arg_i', 'ifglen'], {}), '(arg_i, ifglen)\n', (6896, 6911), True, 'import numpy as np\n'), ((6923, 6944), 'numpy.mod', 'np.mod', (['arg_i', 'ifglen'], {}), '(arg_i, ifglen)\n', (6929, 6944), True, 'import numpy as np\n'), ((7114, 7154), 'numpy.array', 'np.array', (['[[np.nan], [np.nan], [np.nan]]'], {}), '([[np.nan], [np.nan], [np.nan]])\n', (7122, 7154), True, 'import numpy as np\n'), ((4049, 4062), 'numpy.int', 'np.int', (['years'], {}), '(years)\n', (4055, 4062), True, 'import numpy as np\n'), ((4092, 4105), 'numpy.int', 'np.int', (['years'], {}), '(years)\n', (4098, 4105), True, 'import numpy as np\n'), ((4161, 4174), 'numpy.int', 'np.int', (['years'], {}), '(years)\n', (4167, 4174), True, 'import numpy as np\n'), ((4648, 4661), 'numpy.int', 'np.int', (['years'], {}), '(years)\n', (4654, 4661), True, 'import numpy as np\n'), ((5059, 5085), 'numpy.floor', 'np.floor', (['((i + 1) / ifglen)'], {}), '((i + 1) / ifglen)\n', (5067, 5085), True, 'import numpy as np\n'), ((5487, 5507), 'numpy.min', 'np.min', (['abs_dist_lon'], {}), '(abs_dist_lon)\n', (5493, 5507), True, 'import numpy as np\n'), ((5551, 5571), 'numpy.min', 'np.min', (['abs_dist_lat'], {}), '(abs_dist_lat)\n', (5557, 5571), True, 'import numpy as np\n'), ((6805, 6825), 'numpy.mod', 'np.mod', (['arg_i', '(50000)'], {}), '(arg_i, 50000)\n', (6811, 6825), True, 'import numpy as np\n'), ((7003, 7027), 'numpy.floor', 'np.floor', (['(arg_i / ifglen)'], {}), '(arg_i / ifglen)\n', (7011, 7027), True, 'import numpy as np\n'), ((8119, 8134), 'joblib.delayed', 'delayed', (['x_est_'], {}), '(x_est_)\n', (8126, 8134), False, 'from joblib import Parallel, delayed\n'), ((3929, 3942), 'numpy.int', 'np.int', (['years'], {}), '(years)\n', (3935, 3942), True, 'import numpy as np\n'), ((5845, 5858), 'numpy.floor', 'np.floor', (['day'], {}), '(day)\n', (5853, 5858), True, 'import numpy as np\n'), ((5963, 5976), 'numpy.floor', 'np.floor', (['day'], {}), '(day)\n', (5971, 5976), True, 'import numpy as np\n'), ((6040, 6053), 'numpy.floor', 'np.floor', (['day'], {}), '(day)\n', (6048, 6053), True, 'import numpy as np\n'), ((6160, 6173), 'numpy.floor', 'np.floor', (['day'], {}), '(day)\n', (6168, 6173), True, 'import numpy as np\n'), ((6446, 6467), 'numpy.round', 'np.round', (['day_diff[0]'], {}), '(day_diff[0])\n', (6454, 6467), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
""" Gathers codes snippets used in the test suite.
"""
import unittest
from contextlib import contextmanager
from functools import wraps
import os
import sys
import numpy as np
import PIL
test_dir = os.path.dirname(__file__)
temporary_data_dir = os.path.join(test_dir, "_temporary_data")
ref_data_dir = os.path.join(test_dir, "REFERENCE_DATA")
def suite(testcases):
"""
Parameters
testcases : an iterable of unittest.TestCases
Returns
suite : a unittest.TestSuite combining all the individual tests routines
from the input 'testcases' list (by default these are the method
names beginning with test).
"""
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for testcase in testcases:
suite.addTests(loader.loadTestsFromTestCase(testcase))
return suite
@contextmanager
def suppress_stdout():
""" Temporarly suppress print statement during tests. """
# Note: Only deals with Python level streams ; if need a more involving
# version dealing also with C-level streams:
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
old_stderr = sys.stderr
sys.stderr = devnull
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
def no_stdout(func):
""" Decorator, suppress output of the decorated function"""
@wraps(func)
def wrapper(*args, **kwargs):
with suppress_stdout():
return func(*args, **kwargs)
return wrapper
def compare_png(ref_file, test_file):
""" Return a scalar value function of the difference between 2 images :
arithmetic mean of the rgb deltas
"""
ref_image = PIL.Image.open(ref_file)
test_image = PIL.Image.open(test_file)
root, ext = os.path.splitext(test_file)
diff_file = root + ".diff" + ext
diff_image = PIL.ImageChops.difference(ref_image, test_image)
diff_image.save(diff_file)
errors = np.asarray(diff_image) / 255.
return np.mean(errors)
| [
"unittest.TestSuite",
"PIL.ImageChops.difference",
"PIL.Image.open",
"numpy.mean",
"os.path.join",
"os.path.splitext",
"functools.wraps",
"numpy.asarray",
"os.path.dirname",
"unittest.TestLoader"
] | [((225, 250), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (240, 250), False, 'import os\n'), ((272, 313), 'os.path.join', 'os.path.join', (['test_dir', '"""_temporary_data"""'], {}), "(test_dir, '_temporary_data')\n", (284, 313), False, 'import os\n'), ((329, 369), 'os.path.join', 'os.path.join', (['test_dir', '"""REFERENCE_DATA"""'], {}), "(test_dir, 'REFERENCE_DATA')\n", (341, 369), False, 'import os\n'), ((697, 717), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (715, 717), False, 'import unittest\n'), ((731, 752), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (750, 752), False, 'import unittest\n'), ((1554, 1565), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1559, 1565), False, 'from functools import wraps\n'), ((1869, 1893), 'PIL.Image.open', 'PIL.Image.open', (['ref_file'], {}), '(ref_file)\n', (1883, 1893), False, 'import PIL\n'), ((1911, 1936), 'PIL.Image.open', 'PIL.Image.open', (['test_file'], {}), '(test_file)\n', (1925, 1936), False, 'import PIL\n'), ((1958, 1985), 'os.path.splitext', 'os.path.splitext', (['test_file'], {}), '(test_file)\n', (1974, 1985), False, 'import os\n'), ((2040, 2088), 'PIL.ImageChops.difference', 'PIL.ImageChops.difference', (['ref_image', 'test_image'], {}), '(ref_image, test_image)\n', (2065, 2088), False, 'import PIL\n'), ((2174, 2189), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (2181, 2189), True, 'import numpy as np\n'), ((2133, 2155), 'numpy.asarray', 'np.asarray', (['diff_image'], {}), '(diff_image)\n', (2143, 2155), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Copyright 2019, <NAME>, HKUST.
Training script.
"""
from __future__ import print_function
import os
import time
import sys
import math
import argparse
from random import randint
import cv2
import numpy as np
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import matplotlib.pyplot as plt
sys.path.append("../")
from tools.common import Notify
from preprocess import *
from model import *
from loss import *
from homography_warping import get_homographies, homography_warping
import photometric_augmentation as photaug
# paths
tf.compat.v1.app.flags.DEFINE_string('blendedmvs_data_root', '/data/BlendedMVS/dataset_low_res',
"""Path to dtu dataset.""")
tf.compat.v1.app.flags.DEFINE_string('eth3d_data_root', '/data/eth3d/lowres/training/undistorted',
"""Path to dtu dataset.""")
tf.compat.v1.app.flags.DEFINE_string('dtu_data_root', '/data/dtu',
"""Path to dtu dataset.""")
tf.compat.v1.app.flags.DEFINE_boolean('train_blendedmvs', False,
"""Whether to train.""")
tf.compat.v1.app.flags.DEFINE_boolean('train_blendedmvg', False,
"""Whether to train.""")
tf.compat.v1.app.flags.DEFINE_boolean('train_dtu', False,
"""Whether to train.""")
tf.compat.v1.app.flags.DEFINE_boolean('train_eth3d', False,
"""Whether to train.""")
tf.compat.v1.app.flags.DEFINE_string('log_folder', '/data/tf_log',
"""Path to store the log.""")
tf.compat.v1.app.flags.DEFINE_string('model_folder', '/data/tf_model',
"""Path to save the model.""")
tf.compat.v1.app.flags.DEFINE_integer('ckpt_step', 0,
"""ckpt step.""")
tf.compat.v1.app.flags.DEFINE_boolean('use_pretrain', False,
"""Whether to train.""")
# input parameters
tf.compat.v1.app.flags.DEFINE_integer('view_num', 3,
"""Number of images (1 ref image and view_num - 1 view images).""")
tf.compat.v1.app.flags.DEFINE_integer('max_d', 192,
"""Maximum depth step when training.""")
tf.compat.v1.app.flags.DEFINE_integer('max_w', 640,
"""Maximum image width when training.""")
tf.compat.v1.app.flags.DEFINE_integer('max_h', 512,
"""Maximum image height when training.""")
tf.compat.v1.app.flags.DEFINE_float('sample_scale', 0.25,
"""Downsample scale for building cost volume.""")
# network architectures
tf.compat.v1.app.flags.DEFINE_string('regularization', 'GRU',
"""Regularization method.""")
tf.compat.v1.app.flags.DEFINE_boolean('refinement', False,
"""Whether to apply depth map refinement for 3DCNNs""")
# training parameters
tf.compat.v1.app.flags.DEFINE_integer('num_gpus', 1,
"""Number of GPUs.""")
tf.compat.v1.app.flags.DEFINE_integer('batch_size', 1,
"""Training batch size.""")
tf.compat.v1.app.flags.DEFINE_integer('epoch', 6,
"""Training epoch number.""")
tf.compat.v1.app.flags.DEFINE_float('base_lr', 0.001,
"""Base learning rate.""")
tf.compat.v1.app.flags.DEFINE_integer('display', 1,
"""Interval of loginfo display.""")
tf.compat.v1.app.flags.DEFINE_integer('stepvalue', 10000,
"""Step interval to decay learning rate.""")
tf.compat.v1.app.flags.DEFINE_integer('snapshot', 5000,
"""Step interval to save the model.""")
tf.compat.v1.app.flags.DEFINE_float('gamma', 0.9,
"""Learning rate decay rate.""")
tf.compat.v1.app.flags.DEFINE_boolean('online_augmentation', False,
"""Whether to apply image online augmentation during training""")
FLAGS = tf.compat.v1.app.flags.FLAGS
def online_augmentation(image, random_order=True):
primitives = photaug.augmentations
config = {}
config['random_brightness'] = {'max_abs_change': 50}
config['random_contrast'] = {'strength_range': [0.3, 1.5]}
config['additive_gaussian_noise'] = {'stddev_range': [0, 10]}
config['additive_speckle_noise'] = {'prob_range': [0, 0.0035]}
config['additive_shade'] = {'transparency_range': [-0.5, 0.5], 'kernel_size_range': [100, 150]}
config['motion_blur'] = {'max_kernel_size': 3}
with tf.compat.v1.name_scope('online_augmentation'):
prim_configs = [config.get(p, {}) for p in primitives]
indices = tf.range(len(primitives))
if random_order:
indices = tf.random.shuffle(indices)
def step(i, image):
fn_pairs = [(tf.equal(indices[i], j), lambda p=p, c=c: getattr(photaug, p)(image, **c))
for j, (p, c) in enumerate(zip(primitives, prim_configs))]
image = tf.case(fn_pairs)
return i + 1, image
_, aug_image = tf.while_loop(cond=lambda i, image: tf.less(i, len(primitives)),
body=step, loop_vars=[0, image], parallel_iterations=1)
return aug_image
class MVSGenerator:
""" data generator class, tf only accept generator without param """
def __init__(self, sample_list, view_num):
self.sample_list = sample_list
self.view_num = view_num
self.sample_num = len(sample_list)
self.counter = 0
def __iter__(self):
while True:
for data in self.sample_list:
start_time = time.time()
###### read input data ######
images = []
cams = []
for view in range(self.view_num):
image = cv2.imread(data[2 * view])
cam = load_cam(open(data[2 * view + 1]))
images.append(image)
cams.append(cam)
depth_image = load_pfm(open(data[2 * self.view_num]))
# dataset specified process
if FLAGS.train_blendedmvs:
# downsize by 4 to fit depth map output
depth_image = scale_image(depth_image, scale=FLAGS.sample_scale)
cams = scale_mvs_camera(cams, scale=FLAGS.sample_scale)
elif FLAGS.train_dtu:
# set depth range to [425, 937]
cams[0][1, 3, 0] = 425
cams[0][1, 3, 3] = 937
elif FLAGS.train_eth3d:
# crop images
images, cams, depth_image = crop_mvs_input(
images, cams, depth_image, max_w=FLAGS.max_w, max_h=FLAGS.max_h)
# downsize by 4 to fit depth map output
depth_image = scale_image(depth_image, scale=FLAGS.sample_scale)
cams = scale_mvs_camera(cams, scale=FLAGS.sample_scale)
else:
print ('Please specify a valid training dataset.')
exit(-1)
# skip invalid views
if cams[0][1, 3, 0] <= 0 or cams[0][1, 3, 3] <= 0:
continue
# fix depth range and adapt depth sample number
cams[0][1, 3, 2] = FLAGS.max_d
cams[0][1, 3, 1] = (cams[0][1, 3, 3] - cams[0][1, 3, 0]) / FLAGS.max_d
# mask out-of-range depth pixels (in a relaxed range)
depth_start = cams[0][1, 3, 0] + cams[0][1, 3, 1]
depth_end = cams[0][1, 3, 0] + (FLAGS.max_d - 2) * cams[0][1, 3, 1]
depth_image = mask_depth_image(depth_image, depth_start, depth_end)
# return mvs input
self.counter += 1
duration = time.time() - start_time
images = np.stack(images, axis=0)
cams = np.stack(cams, axis=0)
print('Forward pass: d_min = %f, d_max = %f.' % \
(cams[0][1, 3, 0], cams[0][1, 3, 0] + (FLAGS.max_d - 1) * cams[0][1, 3, 1]))
yield (images, cams, depth_image)
# return backward mvs input for GRU
if FLAGS.regularization == 'GRU':
self.counter += 1
start_time = time.time()
cams[0][1, 3, 0] = cams[0][1, 3, 0] + (FLAGS.max_d - 1) * cams[0][1, 3, 1]
cams[0][1, 3, 1] = -cams[0][1, 3, 1]
duration = time.time() - start_time
print('Back pass: d_min = %f, d_max = %f.' % \
(cams[0][1, 3, 0], cams[0][1, 3, 0] + (FLAGS.max_d - 1) * cams[0][1, 3, 1]))
yield (images, cams, depth_image)
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(input_tensor=grad, axis=0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(traning_list):
""" training mvsnet """
training_sample_size = len(traning_list)
if FLAGS.regularization == 'GRU':
training_sample_size = training_sample_size * 2
print ('Training sample number: ', training_sample_size)
with tf.Graph().as_default(), tf.device('/cpu:0'):
########## data iterator #########
# training generators
training_generator = iter(MVSGenerator(traning_list, FLAGS.view_num))
generator_data_type = (tf.float32, tf.float32, tf.float32)
# dataset from generator
training_set = tf.data.Dataset.from_generator(lambda: training_generator, generator_data_type)
training_set = training_set.batch(FLAGS.batch_size)
training_set = training_set.prefetch(buffer_size=1)
# iterators
training_iterator = tf.compat.v1.data.make_initializable_iterator(training_set)
########## optimization options ##########
global_step = tf.Variable(0, trainable=False, name='global_step')
lr_op = tf.compat.v1.train.exponential_decay(FLAGS.base_lr, global_step=global_step,
decay_steps=FLAGS.stepvalue, decay_rate=FLAGS.gamma, name='lr')
opt = tf.compat.v1.train.RMSPropOptimizer(learning_rate=lr_op)
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.compat.v1.name_scope('Model_tower%d' % i) as scope:
# get data
images, cams, depth_image = training_iterator.get_next()
# photometric augmentation and image normalization
arg_images = []
for view in range(0, FLAGS.view_num):
image = tf.squeeze(tf.slice(images, [0, view, 0, 0, 0], [-1, 1, -1, -1, 3]), axis=1)
if FLAGS.online_augmentation:
image = tf.map_fn(online_augmentation, image, back_prop=False)
image = tf.image.per_image_standardization(image)
arg_images.append(image)
images = tf.stack(arg_images, axis=1)
images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3]))
cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4]))
depth_image.set_shape(tf.TensorShape([None, None, None, 1]))
depth_start = tf.reshape(
tf.slice(cams, [0, 0, 1, 3, 0], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])
depth_interval = tf.reshape(
tf.slice(cams, [0, 0, 1, 3, 1], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])
is_master_gpu = False
if i == 0:
is_master_gpu = True
# inference
if FLAGS.regularization == '3DCNNs':
# initial depth map
depth_map, prob_map = inference(
images, cams, FLAGS.max_d, depth_start, depth_interval, is_master_gpu)
# refinement
if FLAGS.refinement:
ref_image = tf.squeeze(
tf.slice(images, [0, 0, 0, 0, 0], [-1, 1, -1, -1, 3]), axis=1)
refined_depth_map = depth_refine(depth_map, ref_image,
FLAGS.max_d, depth_start, depth_interval, is_master_gpu)
else:
refined_depth_map = depth_map
# regression loss
loss0, less_one_temp, less_three_temp = mvsnet_regression_loss(
depth_map, depth_image, depth_interval)
loss1, less_one_accuracy, less_three_accuracy = mvsnet_regression_loss(
refined_depth_map, depth_image, depth_interval)
loss = (loss0 + loss1) / 2
elif FLAGS.regularization == 'GRU':
# probability volume
prob_volume = inference_prob_recurrent(
images, cams, FLAGS.max_d, depth_start, depth_interval, is_master_gpu)
# classification loss
loss, mae, less_one_accuracy, less_three_accuracy, depth_map = \
mvsnet_classification_loss(
prob_volume, depth_image, FLAGS.max_d, depth_start, depth_interval)
# retain the summaries from the final tower.
summaries = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SUMMARIES, scope)
# calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# keep track of the gradients across all towers.
tower_grads.append(grads)
# average gradient
grads = average_gradients(tower_grads)
# training opt
train_opt = opt.apply_gradients(grads, global_step=global_step)
# summary
summaries.append(tf.compat.v1.summary.scalar('loss', loss))
summaries.append(tf.compat.v1.summary.scalar('less_one_accuracy', less_one_accuracy))
summaries.append(tf.compat.v1.summary.scalar('less_three_accuracy', less_three_accuracy))
summaries.append(tf.compat.v1.summary.scalar('lr', lr_op))
weights_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
for var in weights_list:
summaries.append(tf.compat.v1.summary.histogram(var.op.name, var))
for grad, var in grads:
if grad is not None:
summaries.append(tf.compat.v1.summary.histogram(var.op.name + '/gradients', grad))
# saver
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=None)
summary_op = tf.compat.v1.summary.merge(summaries)
# initialization option
init_op = tf.compat.v1.global_variables_initializer()
config = tf.compat.v1.ConfigProto(allow_soft_placement = True)
config.gpu_options.allow_growth = True
with tf.compat.v1.Session(config=config) as sess:
# initialization
total_step = 0
sess.run(init_op)
summary_writer = tf.compat.v1.summary.FileWriter(FLAGS.log_folder, sess.graph)
# load pre-trained model
if FLAGS.use_pretrain:
pretrained_model_path = os.path.join(FLAGS.model_folder, FLAGS.regularization, 'model.ckpt')
restorer = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())
restorer.restore(sess, '-'.join([pretrained_model_path, str(FLAGS.ckpt_step)]))
print(Notify.INFO, 'Pre-trained model restored from %s' %
('-'.join([pretrained_model_path, str(FLAGS.ckpt_step)])), Notify.ENDC)
total_step = FLAGS.ckpt_step
# training several epochs
for epoch in range(FLAGS.epoch):
# training of one epoch
step = 0
sess.run(training_iterator.initializer)
for _ in range(int(training_sample_size / FLAGS.num_gpus)):
# run one batch
start_time = time.time()
try:
out_summary_op, out_opt, out_loss, out_less_one, out_less_three = sess.run(
[summary_op, train_opt, loss, less_one_accuracy, less_three_accuracy])
except tf.errors.OutOfRangeError:
print("End of dataset") # ==> "End of dataset"
break
duration = time.time() - start_time
# print info
if step % FLAGS.display == 0:
print(Notify.INFO,
'epoch, %d, step %d, total_step %d, loss = %.4f, (< 1px) = %.4f, (< 3px) = %.4f (%.3f sec/step)' %
(epoch, step, total_step, out_loss, out_less_one, out_less_three, duration), Notify.ENDC)
# write summary
if step % (FLAGS.display * 10) == 0:
summary_writer.add_summary(out_summary_op, total_step)
# save the model checkpoint periodically
if (total_step % FLAGS.snapshot == 0 or step == (training_sample_size - 1)):
model_folder = os.path.join(FLAGS.model_folder, FLAGS.regularization)
if not os.path.exists(model_folder):
os.mkdir(model_folder)
ckpt_path = os.path.join(model_folder, 'model.ckpt')
print(Notify.INFO, 'Saving model to %s' % ckpt_path, Notify.ENDC)
saver.save(sess, ckpt_path, global_step=total_step)
step += FLAGS.batch_size * FLAGS.num_gpus
total_step += FLAGS.batch_size * FLAGS.num_gpus
def main(argv=None): # pylint: disable=unused-argument
""" program entrance """
# Prepare all training samples
if FLAGS.train_blendedmvs:
sample_list = gen_blendedmvs_path(FLAGS.blendedmvs_data_root, mode='training_mvs')
if FLAGS.train_blendedmvg:
sample_list = gen_blendedmvs_path(FLAGS.blendedmvs_data_root, mode='training_mvg')
if FLAGS.train_dtu:
sample_list = gen_dtu_resized_path(FLAGS.dtu_data_root)
if FLAGS.train_eth3d:
sample_list = gen_eth3d_path(FLAGS.eth3d_data_root, mode='training')
# Shuffle
random.shuffle(sample_list)
# Training entrance.
train(sample_list)
if __name__ == '__main__':
print ('Training MVSNet with totally %d views inputs (including reference view)' % FLAGS.view_num)
tf.compat.v1.app.run()
| [
"tensorflow.compat.v1.summary.merge",
"tensorflow.equal",
"tensorflow.compat.v1.get_collection",
"tensorflow.reduce_mean",
"sys.path.append",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.train.exponential_decay",
"tensorflow.Graph",
"te... | [((279, 341), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (313, 341), True, 'import tensorflow as tf\n'), ((380, 402), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (395, 402), False, 'import sys\n'), ((631, 755), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.compat.v1.app.flags.DEFINE_string', (['"""blendedmvs_data_root"""', '"""/data/BlendedMVS/dataset_low_res"""', '"""Path to dtu dataset."""'], {}), "('blendedmvs_data_root',\n '/data/BlendedMVS/dataset_low_res', 'Path to dtu dataset.')\n", (667, 755), True, 'import tensorflow as tf\n'), ((786, 912), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.compat.v1.app.flags.DEFINE_string', (['"""eth3d_data_root"""', '"""/data/eth3d/lowres/training/undistorted"""', '"""Path to dtu dataset."""'], {}), "('eth3d_data_root',\n '/data/eth3d/lowres/training/undistorted', 'Path to dtu dataset.')\n", (822, 912), True, 'import tensorflow as tf\n'), ((943, 1037), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.compat.v1.app.flags.DEFINE_string', (['"""dtu_data_root"""', '"""/data/dtu"""', '"""Path to dtu dataset."""'], {}), "('dtu_data_root', '/data/dtu',\n 'Path to dtu dataset.')\n", (979, 1037), True, 'import tensorflow as tf\n'), ((1068, 1157), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.compat.v1.app.flags.DEFINE_boolean', (['"""train_blendedmvs"""', '(False)', '"""Whether to train."""'], {}), "('train_blendedmvs', False,\n 'Whether to train.')\n", (1105, 1157), True, 'import tensorflow as tf\n'), ((1189, 1278), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.compat.v1.app.flags.DEFINE_boolean', (['"""train_blendedmvg"""', '(False)', '"""Whether to train."""'], {}), "('train_blendedmvg', False,\n 'Whether to train.')\n", (1226, 1278), True, 'import tensorflow as tf\n'), ((1310, 1388), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.compat.v1.app.flags.DEFINE_boolean', (['"""train_dtu"""', '(False)', '"""Whether to train."""'], {}), "('train_dtu', False, 'Whether to train.')\n", (1347, 1388), True, 'import tensorflow as tf\n'), ((1424, 1509), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.compat.v1.app.flags.DEFINE_boolean', (['"""train_eth3d"""', '(False)', '"""Whether to train."""'], {}), "('train_eth3d', False, 'Whether to train.'\n )\n", (1461, 1509), True, 'import tensorflow as tf\n'), ((1540, 1636), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.compat.v1.app.flags.DEFINE_string', (['"""log_folder"""', '"""/data/tf_log"""', '"""Path to store the log."""'], {}), "('log_folder', '/data/tf_log',\n 'Path to store the log.')\n", (1576, 1636), True, 'import tensorflow as tf\n'), ((1666, 1767), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.compat.v1.app.flags.DEFINE_string', (['"""model_folder"""', '"""/data/tf_model"""', '"""Path to save the model."""'], {}), "('model_folder', '/data/tf_model',\n 'Path to save the model.')\n", (1702, 1767), True, 'import tensorflow as tf\n'), ((1797, 1864), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""ckpt_step"""', '(0)', '"""ckpt step."""'], {}), "('ckpt_step', 0, 'ckpt step.')\n", (1834, 1864), True, 'import tensorflow as tf\n'), ((1899, 1984), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.compat.v1.app.flags.DEFINE_boolean', (['"""use_pretrain"""', '(False)', '"""Whether to train."""'], {}), "('use_pretrain', False,\n 'Whether to train.')\n", (1936, 1984), True, 'import tensorflow as tf\n'), ((2038, 2158), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""view_num"""', '(3)', '"""Number of images (1 ref image and view_num - 1 view images)."""'], {}), "('view_num', 3,\n 'Number of images (1 ref image and view_num - 1 view images).')\n", (2075, 2158), True, 'import tensorflow as tf\n'), ((2190, 2282), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""max_d"""', '(192)', '"""Maximum depth step when training."""'], {}), "('max_d', 192,\n 'Maximum depth step when training.')\n", (2227, 2282), True, 'import tensorflow as tf\n'), ((2314, 2407), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""max_w"""', '(640)', '"""Maximum image width when training."""'], {}), "('max_w', 640,\n 'Maximum image width when training.')\n", (2351, 2407), True, 'import tensorflow as tf\n'), ((2439, 2533), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""max_h"""', '(512)', '"""Maximum image height when training."""'], {}), "('max_h', 512,\n 'Maximum image height when training.')\n", (2476, 2533), True, 'import tensorflow as tf\n'), ((2565, 2672), 'tensorflow.compat.v1.app.flags.DEFINE_float', 'tf.compat.v1.app.flags.DEFINE_float', (['"""sample_scale"""', '(0.25)', '"""Downsample scale for building cost volume."""'], {}), "('sample_scale', 0.25,\n 'Downsample scale for building cost volume.')\n", (2600, 2672), True, 'import tensorflow as tf\n'), ((2731, 2822), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.compat.v1.app.flags.DEFINE_string', (['"""regularization"""', '"""GRU"""', '"""Regularization method."""'], {}), "('regularization', 'GRU',\n 'Regularization method.')\n", (2767, 2822), True, 'import tensorflow as tf\n'), ((2852, 2966), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.compat.v1.app.flags.DEFINE_boolean', (['"""refinement"""', '(False)', '"""Whether to apply depth map refinement for 3DCNNs"""'], {}), "('refinement', False,\n 'Whether to apply depth map refinement for 3DCNNs')\n", (2889, 2966), True, 'import tensorflow as tf\n'), ((3021, 3092), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""num_gpus"""', '(1)', '"""Number of GPUs."""'], {}), "('num_gpus', 1, 'Number of GPUs.')\n", (3058, 3092), True, 'import tensorflow as tf\n'), ((3128, 3206), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""batch_size"""', '(1)', '"""Training batch size."""'], {}), "('batch_size', 1, 'Training batch size.')\n", (3165, 3206), True, 'import tensorflow as tf\n'), ((3242, 3317), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""epoch"""', '(6)', '"""Training epoch number."""'], {}), "('epoch', 6, 'Training epoch number.')\n", (3279, 3317), True, 'import tensorflow as tf\n'), ((3353, 3429), 'tensorflow.compat.v1.app.flags.DEFINE_float', 'tf.compat.v1.app.flags.DEFINE_float', (['"""base_lr"""', '(0.001)', '"""Base learning rate."""'], {}), "('base_lr', 0.001, 'Base learning rate.')\n", (3388, 3429), True, 'import tensorflow as tf\n'), ((3462, 3549), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""display"""', '(1)', '"""Interval of loginfo display."""'], {}), "('display', 1,\n 'Interval of loginfo display.')\n", (3499, 3549), True, 'import tensorflow as tf\n'), ((3580, 3682), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""stepvalue"""', '(10000)', '"""Step interval to decay learning rate."""'], {}), "('stepvalue', 10000,\n 'Step interval to decay learning rate.')\n", (3617, 3682), True, 'import tensorflow as tf\n'), ((3713, 3808), 'tensorflow.compat.v1.app.flags.DEFINE_integer', 'tf.compat.v1.app.flags.DEFINE_integer', (['"""snapshot"""', '(5000)', '"""Step interval to save the model."""'], {}), "('snapshot', 5000,\n 'Step interval to save the model.')\n", (3750, 3808), True, 'import tensorflow as tf\n'), ((3839, 3917), 'tensorflow.compat.v1.app.flags.DEFINE_float', 'tf.compat.v1.app.flags.DEFINE_float', (['"""gamma"""', '(0.9)', '"""Learning rate decay rate."""'], {}), "('gamma', 0.9, 'Learning rate decay rate.')\n", (3874, 3917), True, 'import tensorflow as tf\n'), ((3950, 4083), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.compat.v1.app.flags.DEFINE_boolean', (['"""online_augmentation"""', '(False)', '"""Whether to apply image online augmentation during training"""'], {}), "('online_augmentation', False,\n 'Whether to apply image online augmentation during training')\n", (3987, 4083), True, 'import tensorflow as tf\n'), ((20962, 20984), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (20982, 20984), True, 'import tensorflow as tf\n'), ((4687, 4733), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""online_augmentation"""'], {}), "('online_augmentation')\n", (4710, 4733), True, 'import tensorflow as tf\n'), ((10198, 10229), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'grads'}), '(axis=0, values=grads)\n', (10207, 10229), True, 'import tensorflow as tf\n'), ((10246, 10287), 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': 'grad', 'axis': '(0)'}), '(input_tensor=grad, axis=0)\n', (10260, 10287), True, 'import tensorflow as tf\n'), ((10912, 10931), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (10921, 10931), True, 'import tensorflow as tf\n'), ((11216, 11301), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['(lambda : training_generator)', 'generator_data_type'], {}), '(lambda : training_generator, generator_data_type\n )\n', (11246, 11301), True, 'import tensorflow as tf\n'), ((11468, 11527), 'tensorflow.compat.v1.data.make_initializable_iterator', 'tf.compat.v1.data.make_initializable_iterator', (['training_set'], {}), '(training_set)\n', (11513, 11527), True, 'import tensorflow as tf\n'), ((11605, 11656), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (11616, 11656), True, 'import tensorflow as tf\n'), ((11674, 11818), 'tensorflow.compat.v1.train.exponential_decay', 'tf.compat.v1.train.exponential_decay', (['FLAGS.base_lr'], {'global_step': 'global_step', 'decay_steps': 'FLAGS.stepvalue', 'decay_rate': 'FLAGS.gamma', 'name': '"""lr"""'}), "(FLAGS.base_lr, global_step=global_step,\n decay_steps=FLAGS.stepvalue, decay_rate=FLAGS.gamma, name='lr')\n", (11710, 11818), True, 'import tensorflow as tf\n'), ((11875, 11931), 'tensorflow.compat.v1.train.RMSPropOptimizer', 'tf.compat.v1.train.RMSPropOptimizer', ([], {'learning_rate': 'lr_op'}), '(learning_rate=lr_op)\n', (11910, 11931), True, 'import tensorflow as tf\n'), ((16395, 16466), 'tensorflow.compat.v1.get_collection', 'tf.compat.v1.get_collection', (['tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n', (16422, 16466), True, 'import tensorflow as tf\n'), ((16898, 16935), 'tensorflow.compat.v1.summary.merge', 'tf.compat.v1.summary.merge', (['summaries'], {}), '(summaries)\n', (16924, 16935), True, 'import tensorflow as tf\n'), ((16990, 17033), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (17031, 17033), True, 'import tensorflow as tf\n'), ((17052, 17103), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (17076, 17103), True, 'import tensorflow as tf\n'), ((4895, 4921), 'tensorflow.random.shuffle', 'tf.random.shuffle', (['indices'], {}), '(indices)\n', (4912, 4921), True, 'import tensorflow as tf\n'), ((5159, 5176), 'tensorflow.case', 'tf.case', (['fn_pairs'], {}), '(fn_pairs)\n', (5166, 5176), True, 'import tensorflow as tf\n'), ((9993, 10013), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (10007, 10013), True, 'import tensorflow as tf\n'), ((16066, 16107), 'tensorflow.compat.v1.summary.scalar', 'tf.compat.v1.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (16093, 16107), True, 'import tensorflow as tf\n'), ((16135, 16202), 'tensorflow.compat.v1.summary.scalar', 'tf.compat.v1.summary.scalar', (['"""less_one_accuracy"""', 'less_one_accuracy'], {}), "('less_one_accuracy', less_one_accuracy)\n", (16162, 16202), True, 'import tensorflow as tf\n'), ((16230, 16301), 'tensorflow.compat.v1.summary.scalar', 'tf.compat.v1.summary.scalar', (['"""less_three_accuracy"""', 'less_three_accuracy'], {}), "('less_three_accuracy', less_three_accuracy)\n", (16257, 16301), True, 'import tensorflow as tf\n'), ((16329, 16369), 'tensorflow.compat.v1.summary.scalar', 'tf.compat.v1.summary.scalar', (['"""lr"""', 'lr_op'], {}), "('lr', lr_op)\n", (16356, 16369), True, 'import tensorflow as tf\n'), ((16817, 16848), 'tensorflow.compat.v1.global_variables', 'tf.compat.v1.global_variables', ([], {}), '()\n', (16846, 16848), True, 'import tensorflow as tf\n'), ((17170, 17205), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (17190, 17205), True, 'import tensorflow as tf\n'), ((17353, 17414), 'tensorflow.compat.v1.summary.FileWriter', 'tf.compat.v1.summary.FileWriter', (['FLAGS.log_folder', 'sess.graph'], {}), '(FLAGS.log_folder, sess.graph)\n', (17384, 17414), True, 'import tensorflow as tf\n'), ((5834, 5845), 'time.time', 'time.time', ([], {}), '()\n', (5843, 5845), False, 'import time\n'), ((8177, 8201), 'numpy.stack', 'np.stack', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (8185, 8201), True, 'import numpy as np\n'), ((8226, 8248), 'numpy.stack', 'np.stack', (['cams'], {'axis': '(0)'}), '(cams, axis=0)\n', (8234, 8248), True, 'import numpy as np\n'), ((10887, 10897), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10895, 10897), True, 'import tensorflow as tf\n'), ((12020, 12044), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % i)"], {}), "('/gpu:%d' % i)\n", (12029, 12044), True, 'import tensorflow as tf\n'), ((16531, 16579), 'tensorflow.compat.v1.summary.histogram', 'tf.compat.v1.summary.histogram', (['var.op.name', 'var'], {}), '(var.op.name, var)\n', (16561, 16579), True, 'import tensorflow as tf\n'), ((17532, 17600), 'os.path.join', 'os.path.join', (['FLAGS.model_folder', 'FLAGS.regularization', '"""model.ckpt"""'], {}), "(FLAGS.model_folder, FLAGS.regularization, 'model.ckpt')\n", (17544, 17600), False, 'import os\n'), ((4979, 5002), 'tensorflow.equal', 'tf.equal', (['indices[i]', 'j'], {}), '(indices[i], j)\n', (4987, 5002), True, 'import tensorflow as tf\n'), ((6031, 6057), 'cv2.imread', 'cv2.imread', (['data[2 * view]'], {}), '(data[2 * view])\n', (6041, 6057), False, 'import cv2\n'), ((8126, 8137), 'time.time', 'time.time', ([], {}), '()\n', (8135, 8137), False, 'import time\n'), ((8645, 8656), 'time.time', 'time.time', ([], {}), '()\n', (8654, 8656), False, 'import time\n'), ((12068, 12112), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (["('Model_tower%d' % i)"], {}), "('Model_tower%d' % i)\n", (12091, 12112), True, 'import tensorflow as tf\n'), ((12836, 12864), 'tensorflow.stack', 'tf.stack', (['arg_images'], {'axis': '(1)'}), '(arg_images, axis=1)\n', (12844, 12864), True, 'import tensorflow as tf\n'), ((15488, 15556), 'tensorflow.compat.v1.get_collection', 'tf.compat.v1.get_collection', (['tf.compat.v1.GraphKeys.SUMMARIES', 'scope'], {}), '(tf.compat.v1.GraphKeys.SUMMARIES, scope)\n', (15515, 15556), True, 'import tensorflow as tf\n'), ((16682, 16746), 'tensorflow.compat.v1.summary.histogram', 'tf.compat.v1.summary.histogram', (["(var.op.name + '/gradients')", 'grad'], {}), "(var.op.name + '/gradients', grad)\n", (16712, 16746), True, 'import tensorflow as tf\n'), ((17654, 17685), 'tensorflow.compat.v1.global_variables', 'tf.compat.v1.global_variables', ([], {}), '()\n', (17683, 17685), True, 'import tensorflow as tf\n'), ((18361, 18372), 'time.time', 'time.time', ([], {}), '()\n', (18370, 18372), False, 'import time\n'), ((8843, 8854), 'time.time', 'time.time', ([], {}), '()\n', (8852, 8854), False, 'import time\n'), ((12695, 12736), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['image'], {}), '(image)\n', (12729, 12736), True, 'import tensorflow as tf\n'), ((12905, 12958), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, FLAGS.view_num, None, None, 3]'], {}), '([None, FLAGS.view_num, None, None, 3])\n', (12919, 12958), True, 'import tensorflow as tf\n'), ((12996, 13043), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, FLAGS.view_num, 2, 4, 4]'], {}), '([None, FLAGS.view_num, 2, 4, 4])\n', (13010, 13043), True, 'import tensorflow as tf\n'), ((13088, 13125), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, None, None, 1]'], {}), '([None, None, None, 1])\n', (13102, 13125), True, 'import tensorflow as tf\n'), ((13199, 13262), 'tensorflow.slice', 'tf.slice', (['cams', '[0, 0, 1, 3, 0]', '[FLAGS.batch_size, 1, 1, 1, 1]'], {}), '(cams, [0, 0, 1, 3, 0], [FLAGS.batch_size, 1, 1, 1, 1])\n', (13207, 13262), True, 'import tensorflow as tf\n'), ((13359, 13422), 'tensorflow.slice', 'tf.slice', (['cams', '[0, 0, 1, 3, 1]', '[FLAGS.batch_size, 1, 1, 1, 1]'], {}), '(cams, [0, 0, 1, 3, 1], [FLAGS.batch_size, 1, 1, 1, 1])\n', (13367, 13422), True, 'import tensorflow as tf\n'), ((18787, 18798), 'time.time', 'time.time', ([], {}), '()\n', (18796, 18798), False, 'import time\n'), ((19608, 19662), 'os.path.join', 'os.path.join', (['FLAGS.model_folder', 'FLAGS.regularization'], {}), '(FLAGS.model_folder, FLAGS.regularization)\n', (19620, 19662), False, 'import os\n'), ((19814, 19854), 'os.path.join', 'os.path.join', (['model_folder', '"""model.ckpt"""'], {}), "(model_folder, 'model.ckpt')\n", (19826, 19854), False, 'import os\n'), ((12448, 12504), 'tensorflow.slice', 'tf.slice', (['images', '[0, view, 0, 0, 0]', '[-1, 1, -1, -1, 3]'], {}), '(images, [0, view, 0, 0, 0], [-1, 1, -1, -1, 3])\n', (12456, 12504), True, 'import tensorflow as tf\n'), ((12606, 12660), 'tensorflow.map_fn', 'tf.map_fn', (['online_augmentation', 'image'], {'back_prop': '(False)'}), '(online_augmentation, image, back_prop=False)\n', (12615, 12660), True, 'import tensorflow as tf\n'), ((19695, 19723), 'os.path.exists', 'os.path.exists', (['model_folder'], {}), '(model_folder)\n', (19709, 19723), False, 'import os\n'), ((19754, 19776), 'os.mkdir', 'os.mkdir', (['model_folder'], {}), '(model_folder)\n', (19762, 19776), False, 'import os\n'), ((14037, 14090), 'tensorflow.slice', 'tf.slice', (['images', '[0, 0, 0, 0, 0]', '[-1, 1, -1, -1, 3]'], {}), '(images, [0, 0, 0, 0, 0], [-1, 1, -1, -1, 3])\n', (14045, 14090), True, 'import tensorflow as tf\n')] |
import numpy
import theano
from nose.plugins.skip import SkipTest
from theano.tests.unittest_tools import verify_grad
try:
from pylearn2.sandbox.cuda_convnet.response_norm import (
CrossMapNorm,
CrossMapNormUndo
)
from theano.sandbox.cuda import CudaNdarrayType, CudaNdarray
from theano.sandbox.cuda import gpu_from_host
from theano.sandbox.cuda import ftensor4 as cuda_ftensor4
from theano.sandbox.cuda.basic_ops import gpu_contiguous
except ImportError:
raise SkipTest('cuda not available')
if theano.config.mode=='FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
def test_cross_map_norm_simple():
op = CrossMapNorm(16, 15. / 16., 1., True)
x = CudaNdarray(numpy.ones((16, 2, 2, 2), dtype='float32'))
x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4))
f = theano.function([x_], op(x_)[0])
numpy.testing.assert_allclose(f(x), 0.0625)
def test_cross_map_norm_grad_simple():
rng = numpy.random.RandomState([2013, 2, 10])
op = CrossMapNorm(16, 15/16., 1, True)
make_graph = lambda inp: op(gpu_from_host(inp))[0]
verify = lambda array: verify_grad(make_graph, [array])
inputs = [numpy.ones((16, 1, 1, 1), dtype='float32'),
rng.normal(size=(32, 5, 5, 10)).astype('float32')]
for arr in inputs:
yield verify, arr
def test_cross_map_norm_noncontiguous_grad():
# Check the case reported at https://groups.google.com/d/topic/pylearn-users/KxIYc3hczf4/discussion
x = cuda_ftensor4('x')
x_shuffled = x.dimshuffle(1, 2, 3, 0)
x_shuffled = gpu_contiguous(x_shuffled)
response_norm = CrossMapNorm(
size_f=16, add_scale=(15. / 16.), pow_scale=1, blocked=True)
output_shuffled = response_norm(x_shuffled)[0]
output = output_shuffled.dimshuffle(3, 0, 1, 2)
cost = output.sum()
cost.name = 'cost'
grad_x = theano.grad(cost, x)
f = theano.function([x], grad_x, mode=mode_with_gpu)
x_val = CudaNdarray(numpy.ones((2, 16, 2, 2), dtype='float32'))
f(x_val)
def test_optimization():
op = CrossMapNorm(16, 15./16., 1, True)
x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4))
f = theano.function([x_], theano.grad(op(x_)[0].sum(), x_))
nodes = [x for x in f.maker.fgraph.apply_nodes
if type(x.op) == CrossMapNormUndo]
assert len(nodes) == 1
assert nodes[0].op.inplace
| [
"theano.function",
"theano.sandbox.cuda.basic_ops.gpu_contiguous",
"numpy.ones",
"pylearn2.sandbox.cuda_convnet.response_norm.CrossMapNorm",
"theano.sandbox.cuda.CudaNdarrayType",
"theano.sandbox.cuda.ftensor4",
"theano.compile.mode.get_mode",
"theano.sandbox.cuda.gpu_from_host",
"theano.tests.unitt... | [((784, 824), 'pylearn2.sandbox.cuda_convnet.response_norm.CrossMapNorm', 'CrossMapNorm', (['(16)', '(15.0 / 16.0)', '(1.0)', '(True)'], {}), '(16, 15.0 / 16.0, 1.0, True)\n', (796, 824), False, 'from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm, CrossMapNormUndo\n'), ((1094, 1133), 'numpy.random.RandomState', 'numpy.random.RandomState', (['[2013, 2, 10]'], {}), '([2013, 2, 10])\n', (1118, 1133), False, 'import numpy\n'), ((1143, 1179), 'pylearn2.sandbox.cuda_convnet.response_norm.CrossMapNorm', 'CrossMapNorm', (['(16)', '(15 / 16.0)', '(1)', '(True)'], {}), '(16, 15 / 16.0, 1, True)\n', (1155, 1179), False, 'from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm, CrossMapNormUndo\n'), ((1624, 1642), 'theano.sandbox.cuda.ftensor4', 'cuda_ftensor4', (['"""x"""'], {}), "('x')\n", (1637, 1642), True, 'from theano.sandbox.cuda import ftensor4 as cuda_ftensor4\n'), ((1702, 1728), 'theano.sandbox.cuda.basic_ops.gpu_contiguous', 'gpu_contiguous', (['x_shuffled'], {}), '(x_shuffled)\n', (1716, 1728), False, 'from theano.sandbox.cuda.basic_ops import gpu_contiguous\n'), ((1749, 1822), 'pylearn2.sandbox.cuda_convnet.response_norm.CrossMapNorm', 'CrossMapNorm', ([], {'size_f': '(16)', 'add_scale': '(15.0 / 16.0)', 'pow_scale': '(1)', 'blocked': '(True)'}), '(size_f=16, add_scale=15.0 / 16.0, pow_scale=1, blocked=True)\n', (1761, 1822), False, 'from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm, CrossMapNormUndo\n'), ((1999, 2019), 'theano.grad', 'theano.grad', (['cost', 'x'], {}), '(cost, x)\n', (2010, 2019), False, 'import theano\n'), ((2028, 2076), 'theano.function', 'theano.function', (['[x]', 'grad_x'], {'mode': 'mode_with_gpu'}), '([x], grad_x, mode=mode_with_gpu)\n', (2043, 2076), False, 'import theano\n'), ((2194, 2232), 'pylearn2.sandbox.cuda_convnet.response_norm.CrossMapNorm', 'CrossMapNorm', (['(16)', '(15.0 / 16.0)', '(1)', '(True)'], {}), '(16, 15.0 / 16.0, 1, True)\n', (2206, 2232), False, 'from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm, CrossMapNormUndo\n'), ((507, 537), 'nose.plugins.skip.SkipTest', 'SkipTest', (['"""cuda not available"""'], {}), "('cuda not available')\n", (515, 537), False, 'from nose.plugins.skip import SkipTest\n'), ((842, 884), 'numpy.ones', 'numpy.ones', (['(16, 2, 2, 2)'], {'dtype': '"""float32"""'}), "((16, 2, 2, 2), dtype='float32')\n", (852, 884), False, 'import numpy\n'), ((924, 952), 'theano.sandbox.cuda.CudaNdarrayType', 'CudaNdarrayType', (['([False] * 4)'], {}), '([False] * 4)\n', (939, 952), False, 'from theano.sandbox.cuda import CudaNdarrayType, CudaNdarray\n'), ((1259, 1291), 'theano.tests.unittest_tools.verify_grad', 'verify_grad', (['make_graph', '[array]'], {}), '(make_graph, [array])\n', (1270, 1291), False, 'from theano.tests.unittest_tools import verify_grad\n'), ((1306, 1348), 'numpy.ones', 'numpy.ones', (['(16, 1, 1, 1)'], {'dtype': '"""float32"""'}), "((16, 1, 1, 1), dtype='float32')\n", (1316, 1348), False, 'import numpy\n'), ((2101, 2143), 'numpy.ones', 'numpy.ones', (['(2, 16, 2, 2)'], {'dtype': '"""float32"""'}), "((2, 16, 2, 2), dtype='float32')\n", (2111, 2143), False, 'import numpy\n'), ((2267, 2295), 'theano.sandbox.cuda.CudaNdarrayType', 'CudaNdarrayType', (['([False] * 4)'], {}), '([False] * 4)\n', (2282, 2295), False, 'from theano.sandbox.cuda import CudaNdarrayType, CudaNdarray\n'), ((599, 639), 'theano.compile.mode.get_mode', 'theano.compile.mode.get_mode', (['"""FAST_RUN"""'], {}), "('FAST_RUN')\n", (627, 639), False, 'import theano\n'), ((683, 721), 'theano.compile.mode.get_default_mode', 'theano.compile.mode.get_default_mode', ([], {}), '()\n', (719, 721), False, 'import theano\n'), ((1209, 1227), 'theano.sandbox.cuda.gpu_from_host', 'gpu_from_host', (['inp'], {}), '(inp)\n', (1222, 1227), False, 'from theano.sandbox.cuda import gpu_from_host\n')] |
def t89c(points, iopt=0, ps=0.0):
import numpy as np
from geopack.geopack import dip, recalc
from geopack import t89
ut = 100 # 1970-01-01/00:01:40 UT.
ps = recalc(ut)
print(ps)
B = np.zeros(points.shape)
for i in range(points.shape[0]):
r = np.linalg.norm(points[i,:])
if r < 1:
B[i,0] = np.nan
B[i,1] = np.nan
B[i,2] = np.nan
else:
b0xgsm,b0ygsm,b0zgsm = dip(points[i,0], points[i,1], points[i,2])
dbxgsm,dbygsm,dbzgsm = t89.t89(2, ps, points[i,0], points[i,1], points[i,2])
B[i,0] = b0xgsm + dbxgsm
B[i,1] = b0ygsm + dbygsm
B[i,2] = b0zgsm + dbzgsm
return B
import numpy as np
print(t89c(np.ones((2,3)))) | [
"numpy.ones",
"geopack.geopack.recalc",
"numpy.zeros",
"geopack.geopack.dip",
"geopack.t89.t89",
"numpy.linalg.norm"
] | [((183, 193), 'geopack.geopack.recalc', 'recalc', (['ut'], {}), '(ut)\n', (189, 193), False, 'from geopack.geopack import dip, recalc\n'), ((217, 239), 'numpy.zeros', 'np.zeros', (['points.shape'], {}), '(points.shape)\n', (225, 239), True, 'import numpy as np\n'), ((289, 317), 'numpy.linalg.norm', 'np.linalg.norm', (['points[i, :]'], {}), '(points[i, :])\n', (303, 317), True, 'import numpy as np\n'), ((756, 771), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (763, 771), True, 'import numpy as np\n'), ((468, 513), 'geopack.geopack.dip', 'dip', (['points[i, 0]', 'points[i, 1]', 'points[i, 2]'], {}), '(points[i, 0], points[i, 1], points[i, 2])\n', (471, 513), False, 'from geopack.geopack import dip, recalc\n'), ((546, 602), 'geopack.t89.t89', 't89.t89', (['(2)', 'ps', 'points[i, 0]', 'points[i, 1]', 'points[i, 2]'], {}), '(2, ps, points[i, 0], points[i, 1], points[i, 2])\n', (553, 602), False, 'from geopack import t89\n')] |
import cv2
import numpy as np
import nn_models
import data_loading.image_loading as il
import nn_models.Models as models
import data_loading.data_loaders as loaders
import numpy.random
import torch, random
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm as tqdm
import pickle
from datetime import datetime
import os
import string
import argparse
from random import randint
from datetime import datetime
import imutils.annotation_utils
from data_loading.image_loading import load_image
import torchvision.transforms as transforms
def main():
parser = argparse.ArgumentParser(description="Test AdmiralNet")
parser.add_argument("--model_file", type=str, required=True)
parser.add_argument("--annotation_file", type=str, required=True)
parser.add_argument("--write_images", action="store_true")
parser.add_argument("--plot", action="store_true")
args = parser.parse_args()
plot = args.plot
annotation_dir, annotation_file = os.path.split(args.annotation_file)
model_dir, model_file = os.path.split(args.model_file)
config_path = os.path.join(model_dir,'config.pkl')
config_file = open(config_path,'rb')
config = pickle.load(config_file)
print(config)
model_prefix, _ = model_file.split(".")
# return
gpu = int(config['gpu'])
optical_flow = bool(config.get('optical_flow',''))
use_float32 = bool(config['use_float32'])
label_scale = float(config['label_scale'])
prefix, _ = annotation_file.split(".")
prefix = prefix + config['file_prefix'] + 'commandant'
context_length = int(config['context_length'])
sequence_length = int(config['sequence_length'])
hidden_dim = int(config['hidden_dim'])
size = (125, 400)
network = models.CommandantNet(context_length = context_length, sequence_length=sequence_length, hidden_dim = hidden_dim, use_float32 = use_float32, gpu = gpu, optical_flow=optical_flow)
state_dict = torch.load(args.model_file)
network.load_state_dict(state_dict)
print(network)
if(label_scale == 1.0):
label_transformation = None
else:
label_transformation = transforms.Compose([transforms.Lambda(lambda inputs: inputs.mul(label_scale))])
if(use_float32):
network.float()
trainset = loaders.F1SequenceDataset(annotation_dir,annotation_file,size,context_length=context_length, sequence_length=sequence_length, use_float32=True, label_transformation = label_transformation, optical_flow=optical_flow)
else:
network.double()
trainset = loaders.F1SequenceDataset(annotation_dir, annotation_file,size,context_length=context_length, sequence_length=sequence_length, label_transformation = label_transformation, optical_flow=optical_flow)
if(gpu>=0):
network = network.cuda(gpu)
if optical_flow:
if((not os.path.isfile("./" + prefix+"_commandantopticalflows.pkl")) or (not os.path.isfile("./" + prefix+"_commandantopticalflowannotations.pkl"))):
trainset.read_files_flow()
trainset.write_pickles(prefix+"_commandantopticalflows.pkl",prefix+"_commandantopticalflowannotations.pkl")
else:
trainset.read_pickles(prefix+"_commandantopticalflows.pkl",prefix+"_commandantopticalflowannotations.pkl")
else:
if((not os.path.isfile("./" + prefix+"_images.pkl")) or (not os.path.isfile("./" + prefix+"_annotations.pkl"))):
trainset.read_files()
trainset.write_pickles(prefix+"_images.pkl",prefix+"_annotations.pkl")
else:
trainset.read_pickles(prefix+"_images.pkl",prefix+"_annotations.pkl")
''' '''
mean,stdev = trainset.statistics()
mean_ = torch.from_numpy(mean)
stdev_ = torch.from_numpy(stdev)
if use_float32:
mean_.float()
stdev_.float()
trainset.img_transformation = config['image_transformation']
if plot:
batch_size = 1
else:
batch_size = 32
loader = torch.utils.data.DataLoader(trainset, batch_size = batch_size, shuffle = False, num_workers = 0)
cum_diff = 0.0
t = tqdm(enumerate(loader))
network.eval()
predictions=[]
ground_truths=[]
losses=[]
criterion = nn.MSELoss()
if(gpu>=0):
criterion = criterion.cuda(gpu)
if args.write_images:
imdir = "admiralnet_prediction_images_" + model_prefix
os.mkdir(imdir)
annotation_file = open(args.annotation_file,'r')
annotations = annotation_file.readlines()
annotation_file.close()
im,_,_,_,_ = annotations[0].split(",")
background = cv2.imread(os.path.join(annotation_dir,'raw_images',im),cv2.IMREAD_UNCHANGED)
out_size = background.shape
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoout = cv2.VideoWriter(os.path.join(imdir,"video.avi") ,fourcc, 60.0, (out_size[1], out_size[0]),True)
wheel = cv2.imread('steering_wheel.png',cv2.IMREAD_UNCHANGED)
wheelrows = 150
wheelcols = 150
wheel = cv2.resize(wheel, (wheelcols,wheelrows), interpolation = cv2.INTER_CUBIC)
for idx,(inputs, previous_control, labels) in t:
if(gpu>=0):
previous_control = previous_control.cuda(gpu)
inputs = inputs.cuda(gpu)
labels = labels.cuda(gpu)
pred = torch.div(network(inputs,previous_control),label_scale)
if plot:
if pred.shape[1] == 1:
angle = pred.item()
ground_truth = labels.item()
else:
angle = pred.squeeze()[0].item()
ground_truth = labels.squeeze()[0].item()
predictions.append(angle)
ground_truths.append(ground_truth)
t.set_postfix(angle = angle, ground_truth = ground_truth)
loss = criterion(pred, labels)
losses.append(torch.mean(loss).item())
# print("Ground Truth: %f. Prediction: %f.\n" %(scaled_ground_truth, scaled_angle))
if args.write_images:
scaled_angle = 180.0*angle
M = cv2.getRotationMatrix2D((wheelrows/2,wheelcols/2),scaled_angle,1)
wheel_rotated = cv2.warpAffine(wheel,M,(wheelrows,wheelcols))
numpy_im = np.transpose(trainset.images[idx],(1,2,0)).astype(np.float32)
# print(numpy_im.shape)
im,_,_,_,_ = annotations[idx].split(",")
background = cv2.imread(os.path.join(annotation_dir,'raw_images',im),cv2.IMREAD_UNCHANGED)
out_size = background.shape
#print(background.shape)
overlayed = imutils.annotation_utils.overlay_image(background,wheel_rotated,int((out_size[1]-wheelcols)/2),int((out_size[0]-wheelcols)/2))
name = "ouput_image_" + str(idx) + ".png"
output_path = os.path.join(imdir,name)
cv2.imwrite(output_path,overlayed)
videoout.write(overlayed)
'''
'''
predictions_array = np.array(predictions)
ground_truths_array = np.array(ground_truths)
diffs = np.subtract(predictions_array,ground_truths_array)
rms = np.sqrt(np.mean(np.array(losses)))
print("RMS Error: ", rms)
if args.plot:
from scipy import stats
import matplotlib.pyplot as plt
t = np.linspace(0,len(loader)-1,len(loader))
plt.plot(t,predictions_array,'r')
plt.plot(t,ground_truths_array,'b')
#plt.plot(t,diffs)
plt.show()
if __name__ == '__main__':
main()
| [
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.array",
"nn_models.Models.CommandantNet",
"argparse.ArgumentParser",
"torch.mean",
"matplotlib.pyplot.plot",
"numpy.subtract",
"os.path.split",
"os.mkdir",
"cv2.VideoWriter_fourcc",
"cv2.warpAffine",
"data_loading.data_loaders.F1SequenceDataset"... | [((580, 634), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test AdmiralNet"""'}), "(description='Test AdmiralNet')\n", (603, 634), False, 'import argparse\n'), ((978, 1013), 'os.path.split', 'os.path.split', (['args.annotation_file'], {}), '(args.annotation_file)\n', (991, 1013), False, 'import os\n'), ((1042, 1072), 'os.path.split', 'os.path.split', (['args.model_file'], {}), '(args.model_file)\n', (1055, 1072), False, 'import os\n'), ((1091, 1128), 'os.path.join', 'os.path.join', (['model_dir', '"""config.pkl"""'], {}), "(model_dir, 'config.pkl')\n", (1103, 1128), False, 'import os\n'), ((1182, 1206), 'pickle.load', 'pickle.load', (['config_file'], {}), '(config_file)\n', (1193, 1206), False, 'import pickle\n'), ((1744, 1922), 'nn_models.Models.CommandantNet', 'models.CommandantNet', ([], {'context_length': 'context_length', 'sequence_length': 'sequence_length', 'hidden_dim': 'hidden_dim', 'use_float32': 'use_float32', 'gpu': 'gpu', 'optical_flow': 'optical_flow'}), '(context_length=context_length, sequence_length=\n sequence_length, hidden_dim=hidden_dim, use_float32=use_float32, gpu=\n gpu, optical_flow=optical_flow)\n', (1764, 1922), True, 'import nn_models.Models as models\n'), ((1938, 1965), 'torch.load', 'torch.load', (['args.model_file'], {}), '(args.model_file)\n', (1948, 1965), False, 'import torch, random\n'), ((3683, 3705), 'torch.from_numpy', 'torch.from_numpy', (['mean'], {}), '(mean)\n', (3699, 3705), False, 'import torch, random\n'), ((3719, 3742), 'torch.from_numpy', 'torch.from_numpy', (['stdev'], {}), '(stdev)\n', (3735, 3742), False, 'import torch, random\n'), ((3956, 4050), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(trainset, batch_size=batch_size, shuffle=False,\n num_workers=0)\n', (3983, 4050), False, 'import torch, random\n'), ((4193, 4205), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4203, 4205), True, 'import torch.nn as nn\n'), ((6903, 6924), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (6911, 6924), True, 'import numpy as np\n'), ((6951, 6974), 'numpy.array', 'np.array', (['ground_truths'], {}), '(ground_truths)\n', (6959, 6974), True, 'import numpy as np\n'), ((6987, 7038), 'numpy.subtract', 'np.subtract', (['predictions_array', 'ground_truths_array'], {}), '(predictions_array, ground_truths_array)\n', (6998, 7038), True, 'import numpy as np\n'), ((2274, 2502), 'data_loading.data_loaders.F1SequenceDataset', 'loaders.F1SequenceDataset', (['annotation_dir', 'annotation_file', 'size'], {'context_length': 'context_length', 'sequence_length': 'sequence_length', 'use_float32': '(True)', 'label_transformation': 'label_transformation', 'optical_flow': 'optical_flow'}), '(annotation_dir, annotation_file, size,\n context_length=context_length, sequence_length=sequence_length,\n use_float32=True, label_transformation=label_transformation,\n optical_flow=optical_flow)\n', (2299, 2502), True, 'import data_loading.data_loaders as loaders\n'), ((2544, 2750), 'data_loading.data_loaders.F1SequenceDataset', 'loaders.F1SequenceDataset', (['annotation_dir', 'annotation_file', 'size'], {'context_length': 'context_length', 'sequence_length': 'sequence_length', 'label_transformation': 'label_transformation', 'optical_flow': 'optical_flow'}), '(annotation_dir, annotation_file, size,\n context_length=context_length, sequence_length=sequence_length,\n label_transformation=label_transformation, optical_flow=optical_flow)\n', (2569, 2750), True, 'import data_loading.data_loaders as loaders\n'), ((4359, 4374), 'os.mkdir', 'os.mkdir', (['imdir'], {}), '(imdir)\n', (4367, 4374), False, 'import os\n'), ((4713, 4744), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (4735, 4744), False, 'import cv2\n'), ((4876, 4930), 'cv2.imread', 'cv2.imread', (['"""steering_wheel.png"""', 'cv2.IMREAD_UNCHANGED'], {}), "('steering_wheel.png', cv2.IMREAD_UNCHANGED)\n", (4886, 4930), False, 'import cv2\n'), ((4994, 5066), 'cv2.resize', 'cv2.resize', (['wheel', '(wheelcols, wheelrows)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(wheel, (wheelcols, wheelrows), interpolation=cv2.INTER_CUBIC)\n', (5004, 5066), False, 'import cv2\n'), ((7264, 7299), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'predictions_array', '"""r"""'], {}), "(t, predictions_array, 'r')\n", (7272, 7299), True, 'import matplotlib.pyplot as plt\n'), ((7306, 7343), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'ground_truths_array', '"""b"""'], {}), "(t, ground_truths_array, 'b')\n", (7314, 7343), True, 'import matplotlib.pyplot as plt\n'), ((7377, 7387), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7385, 7387), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4639), 'os.path.join', 'os.path.join', (['annotation_dir', '"""raw_images"""', 'im'], {}), "(annotation_dir, 'raw_images', im)\n", (4605, 4639), False, 'import os\n'), ((4780, 4812), 'os.path.join', 'os.path.join', (['imdir', '"""video.avi"""'], {}), "(imdir, 'video.avi')\n", (4792, 4812), False, 'import os\n'), ((6021, 6093), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(wheelrows / 2, wheelcols / 2)', 'scaled_angle', '(1)'], {}), '((wheelrows / 2, wheelcols / 2), scaled_angle, 1)\n', (6044, 6093), False, 'import cv2\n'), ((6115, 6163), 'cv2.warpAffine', 'cv2.warpAffine', (['wheel', 'M', '(wheelrows, wheelcols)'], {}), '(wheel, M, (wheelrows, wheelcols))\n', (6129, 6163), False, 'import cv2\n'), ((6745, 6770), 'os.path.join', 'os.path.join', (['imdir', 'name'], {}), '(imdir, name)\n', (6757, 6770), False, 'import os\n'), ((6782, 6817), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'overlayed'], {}), '(output_path, overlayed)\n', (6793, 6817), False, 'import cv2\n'), ((7064, 7080), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (7072, 7080), True, 'import numpy as np\n'), ((2837, 2898), 'os.path.isfile', 'os.path.isfile', (["('./' + prefix + '_commandantopticalflows.pkl')"], {}), "('./' + prefix + '_commandantopticalflows.pkl')\n", (2851, 2898), False, 'import os\n'), ((2906, 2977), 'os.path.isfile', 'os.path.isfile', (["('./' + prefix + '_commandantopticalflowannotations.pkl')"], {}), "('./' + prefix + '_commandantopticalflowannotations.pkl')\n", (2920, 2977), False, 'import os\n'), ((3299, 3344), 'os.path.isfile', 'os.path.isfile', (["('./' + prefix + '_images.pkl')"], {}), "('./' + prefix + '_images.pkl')\n", (3313, 3344), False, 'import os\n'), ((3352, 3402), 'os.path.isfile', 'os.path.isfile', (["('./' + prefix + '_annotations.pkl')"], {}), "('./' + prefix + '_annotations.pkl')\n", (3366, 3402), False, 'import os\n'), ((6370, 6416), 'os.path.join', 'os.path.join', (['annotation_dir', '"""raw_images"""', 'im'], {}), "(annotation_dir, 'raw_images', im)\n", (6382, 6416), False, 'import os\n'), ((5820, 5836), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (5830, 5836), False, 'import torch, random\n'), ((6184, 6229), 'numpy.transpose', 'np.transpose', (['trainset.images[idx]', '(1, 2, 0)'], {}), '(trainset.images[idx], (1, 2, 0))\n', (6196, 6229), True, 'import numpy as np\n')] |
"""Create data for test_model::test_dipole1d."""
import numpy as np
from external import dipole1d
# # Comparison to DIPOLE1D for EE/ME
# Define model
freq = np.array([0.78])
depth = np.array([213.5, 500, 1000])
res = np.array([3.5, .1, 50, 13])
rec = [np.arange(1, 11)*1000, np.arange(-4, 6)*100, 350]
def collect_model(src, rec, freq, depth, res, srcpts=1):
model = {'src': src,
'rec': rec,
'depth': depth,
'res': res,
'freq': freq,
'srcpts': srcpts}
return model
# 1. x-directed dipole
src1 = [0, 0, 150, 0, 0]
out1 = dipole1d(src1, rec, depth, res, freq)
xdirdip = (collect_model(src1, rec, freq, depth, res), out1)
# 2. y-directed dipole
src2 = [0, 0, 150, 90, 0]
out2 = dipole1d(src2, rec, depth, res, freq)
ydirdip = (collect_model(src2, rec, freq, depth, res), out2)
# 3. z-directed dipole
src3 = [0, 0, 150, 0, 90]
out3 = dipole1d(src3, rec, depth, res, freq)
zdirdip = (collect_model(src3, rec, freq, depth, res), out3)
# 4. Dipole in xy-plane
src4 = [0, 0, 150, 23.5, 0]
out4 = dipole1d(src4, rec, depth, res, freq)
xydirdip = (collect_model(src4, rec, freq, depth, res), out4)
# 5. Dipole in xz-plane
src5 = [0, 0, 150, 0, 39.6]
out5 = dipole1d(src5, rec, depth, res, freq)
xzdirdip = (collect_model(src5, rec, freq, depth, res), out5)
# 6. Dipole in yz-plane
src6 = [0, 0, 150, 90, 69.6]
out6 = dipole1d(src6, rec, depth, res, freq)
yzdirdip = (collect_model(src6, rec, freq, depth, res), out6)
# 7. Arbitrary xyz-dipole
src7 = [0, 0, 150, 13, 76]
out7 = dipole1d(src7, rec, depth, res, freq)
xyzdirdip = (collect_model(src7, rec, freq, depth, res), out7)
# 8. x-directed bipole
src8 = [-50, 90, 0, 0, 150, 150]
out8 = dipole1d(src8, rec, depth, res, freq, 5)
xdirbip = (collect_model(src8, rec, freq, depth, res, 5), out8)
# 9. y-directed bipole
src9 = [0, 0, -50, 90, 150, 150]
out9 = dipole1d(src9, rec, depth, res, freq, 5)
ydirbip = (collect_model(src9, rec, freq, depth, res, 5), out9)
# 10. z-directed bipole
src10 = [0, 0, 0, 0, 140, 170]
out10 = dipole1d(src10, rec, depth, res, freq, 5)
zdirbip = (collect_model(src10, rec, freq, depth, res, 5), out10)
# 11. Bipole in xy-plane
src11 = [-50, 90, -20, 30, 150, 150]
out11 = dipole1d(src11, rec, depth, res, freq, 5)
xydirbip = (collect_model(src11, rec, freq, depth, res, 5), out11)
# 12. Bipole in xz-plane
src12 = [-30, 60, 0, 0, 150, 170]
out12 = dipole1d(src12, rec, depth, res, freq, 5)
xzdirbip = (collect_model(src12, rec, freq, depth, res, 5), out12)
# 13. Bipole in yz-plane
src13 = [0, 0, -30, 20, 140, 170]
out13 = dipole1d(src13, rec, depth, res, freq, 5)
yzdirbip = (collect_model(src13, rec, freq, depth, res, 5), out13)
# 14. Arbitrary xyz-bipole
src14 = [-30, 40, -20, 30, 140, 170]
out14 = dipole1d(src14, rec, depth, res, freq, 5)
xyzdirbip = (collect_model(src14, rec, freq, depth, res, 5), out14)
# # Store data # #
np.savez_compressed('../data/dipole1d.npz',
xdirdip=xdirdip, ydirdip=ydirdip, zdirdip=zdirdip,
xydirdip=xydirdip, xzdirdip=xzdirdip, yzdirdip=yzdirdip,
xyzdirdip=xyzdirdip,
xdirbip=xdirbip, ydirbip=ydirbip, zdirbip=zdirbip,
xydirbip=xydirbip, xzdirbip=xzdirbip, yzdirbip=yzdirbip,
xyzdirbip=xyzdirbip,
)
| [
"numpy.array",
"external.dipole1d",
"numpy.savez_compressed",
"numpy.arange"
] | [((159, 175), 'numpy.array', 'np.array', (['[0.78]'], {}), '([0.78])\n', (167, 175), True, 'import numpy as np\n'), ((184, 212), 'numpy.array', 'np.array', (['[213.5, 500, 1000]'], {}), '([213.5, 500, 1000])\n', (192, 212), True, 'import numpy as np\n'), ((219, 247), 'numpy.array', 'np.array', (['[3.5, 0.1, 50, 13]'], {}), '([3.5, 0.1, 50, 13])\n', (227, 247), True, 'import numpy as np\n'), ((599, 636), 'external.dipole1d', 'dipole1d', (['src1', 'rec', 'depth', 'res', 'freq'], {}), '(src1, rec, depth, res, freq)\n', (607, 636), False, 'from external import dipole1d\n'), ((755, 792), 'external.dipole1d', 'dipole1d', (['src2', 'rec', 'depth', 'res', 'freq'], {}), '(src2, rec, depth, res, freq)\n', (763, 792), False, 'from external import dipole1d\n'), ((911, 948), 'external.dipole1d', 'dipole1d', (['src3', 'rec', 'depth', 'res', 'freq'], {}), '(src3, rec, depth, res, freq)\n', (919, 948), False, 'from external import dipole1d\n'), ((1070, 1107), 'external.dipole1d', 'dipole1d', (['src4', 'rec', 'depth', 'res', 'freq'], {}), '(src4, rec, depth, res, freq)\n', (1078, 1107), False, 'from external import dipole1d\n'), ((1230, 1267), 'external.dipole1d', 'dipole1d', (['src5', 'rec', 'depth', 'res', 'freq'], {}), '(src5, rec, depth, res, freq)\n', (1238, 1267), False, 'from external import dipole1d\n'), ((1391, 1428), 'external.dipole1d', 'dipole1d', (['src6', 'rec', 'depth', 'res', 'freq'], {}), '(src6, rec, depth, res, freq)\n', (1399, 1428), False, 'from external import dipole1d\n'), ((1552, 1589), 'external.dipole1d', 'dipole1d', (['src7', 'rec', 'depth', 'res', 'freq'], {}), '(src7, rec, depth, res, freq)\n', (1560, 1589), False, 'from external import dipole1d\n'), ((1717, 1757), 'external.dipole1d', 'dipole1d', (['src8', 'rec', 'depth', 'res', 'freq', '(5)'], {}), '(src8, rec, depth, res, freq, 5)\n', (1725, 1757), False, 'from external import dipole1d\n'), ((1886, 1926), 'external.dipole1d', 'dipole1d', (['src9', 'rec', 'depth', 'res', 'freq', '(5)'], {}), '(src9, rec, depth, res, freq, 5)\n', (1894, 1926), False, 'from external import dipole1d\n'), ((2055, 2096), 'external.dipole1d', 'dipole1d', (['src10', 'rec', 'depth', 'res', 'freq', '(5)'], {}), '(src10, rec, depth, res, freq, 5)\n', (2063, 2096), False, 'from external import dipole1d\n'), ((2234, 2275), 'external.dipole1d', 'dipole1d', (['src11', 'rec', 'depth', 'res', 'freq', '(5)'], {}), '(src11, rec, depth, res, freq, 5)\n', (2242, 2275), False, 'from external import dipole1d\n'), ((2411, 2452), 'external.dipole1d', 'dipole1d', (['src12', 'rec', 'depth', 'res', 'freq', '(5)'], {}), '(src12, rec, depth, res, freq, 5)\n', (2419, 2452), False, 'from external import dipole1d\n'), ((2588, 2629), 'external.dipole1d', 'dipole1d', (['src13', 'rec', 'depth', 'res', 'freq', '(5)'], {}), '(src13, rec, depth, res, freq, 5)\n', (2596, 2629), False, 'from external import dipole1d\n'), ((2770, 2811), 'external.dipole1d', 'dipole1d', (['src14', 'rec', 'depth', 'res', 'freq', '(5)'], {}), '(src14, rec, depth, res, freq, 5)\n', (2778, 2811), False, 'from external import dipole1d\n'), ((2900, 3219), 'numpy.savez_compressed', 'np.savez_compressed', (['"""../data/dipole1d.npz"""'], {'xdirdip': 'xdirdip', 'ydirdip': 'ydirdip', 'zdirdip': 'zdirdip', 'xydirdip': 'xydirdip', 'xzdirdip': 'xzdirdip', 'yzdirdip': 'yzdirdip', 'xyzdirdip': 'xyzdirdip', 'xdirbip': 'xdirbip', 'ydirbip': 'ydirbip', 'zdirbip': 'zdirbip', 'xydirbip': 'xydirbip', 'xzdirbip': 'xzdirbip', 'yzdirbip': 'yzdirbip', 'xyzdirbip': 'xyzdirbip'}), "('../data/dipole1d.npz', xdirdip=xdirdip, ydirdip=\n ydirdip, zdirdip=zdirdip, xydirdip=xydirdip, xzdirdip=xzdirdip,\n yzdirdip=yzdirdip, xyzdirdip=xyzdirdip, xdirbip=xdirbip, ydirbip=\n ydirbip, zdirbip=zdirbip, xydirbip=xydirbip, xzdirbip=xzdirbip,\n yzdirbip=yzdirbip, xyzdirbip=xyzdirbip)\n", (2919, 3219), True, 'import numpy as np\n'), ((254, 270), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (263, 270), True, 'import numpy as np\n'), ((277, 293), 'numpy.arange', 'np.arange', (['(-4)', '(6)'], {}), '(-4, 6)\n', (286, 293), True, 'import numpy as np\n')] |
from numpy import hstack
from numpy import sum
from numpy import zeros
from gwlfe.Input.LandUse.NLU import NLU
from gwlfe.Memoization import memoize
from gwlfe.MultiUse_Fxns.Erosion.ErosWashoff import ErosWashoff
from gwlfe.MultiUse_Fxns.Erosion.ErosWashoff import ErosWashoff_f
from gwlfe.MultiUse_Fxns.Erosion.SedDelivRatio import SedDelivRatio
from gwlfe.MultiUse_Fxns.Runoff.pRunoff import pRunoff
from gwlfe.MultiUse_Fxns.Runoff.pRunoff import pRunoff_f
from gwlfe.Output.Loading.LuLoad import LuLoad
from gwlfe.Output.Loading.LuLoad import LuLoad_f
@memoize
def LuTotPhos(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0, Area, PhosConc, ManPhos,
ManuredAreas, FirstManureMonth, LastManureMonth, FirstManureMonth2, LastManureMonth2, SedDelivRatio_0, KF,
LS, C, P, CNP_0, Imper, ISRR, ISRA, Qretention, PctAreaInfil, Nqual, LoadRateImp, LoadRatePerv, Storm,
UrbBMPRed, FilterWidth, PctStrmBuf, Acoef, SedPhos, CNI_0):
result = zeros((NYrs, 16))
p_runoff = pRunoff(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0, Area, PhosConc,
ManuredAreas, FirstManureMonth, LastManureMonth, ManPhos, FirstManureMonth2,
LastManureMonth2)
sed_deliv_ratio = SedDelivRatio(SedDelivRatio_0)
eros_washoff = ErosWashoff(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, NUrb, Acoef,
KF, LS, C, P, Area)
lu_load = LuLoad(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0,
AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, Qretention, PctAreaInfil, Nqual,
LoadRateImp, LoadRatePerv, Storm, UrbBMPRed, FilterWidth, PctStrmBuf)
nlu = NLU(NRur, NUrb)
for Y in range(NYrs):
for i in range(12):
# Add in the urban calucation for sediment
for l in range(NRur):
result[Y][l] += p_runoff[Y][i][l]
result[Y][l] += 0.001 * sed_deliv_ratio * eros_washoff[Y][l][i] * SedPhos
for l in range(NRur, nlu):
result[Y][l] += lu_load[Y][l][1] / NYrs / 2
return result
@memoize
def LuTotPhos_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0, Area, PhosConc, ManPhos,
ManuredAreas, FirstManureMonth, LastManureMonth, FirstManureMonth2, LastManureMonth2, SedDelivRatio_0,
KF,
LS, C, P, CNP_0, Imper, ISRR, ISRA, Qretention, PctAreaInfil, Nqual, LoadRateImp, LoadRatePerv, Storm,
UrbBMPRed, FilterWidth, PctStrmBuf, Acoef, SedPhos, CNI_0):
p_runoff = sum(
pRunoff_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0, Area, PhosConc,
ManuredAreas, FirstManureMonth, LastManureMonth, ManPhos, FirstManureMonth2,
LastManureMonth2), axis=1)
sed_deliv_ratio = SedDelivRatio(SedDelivRatio_0)
eros_washoff = sum(ErosWashoff_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, Acoef,
KF, LS, C, P, Area), axis=1)
lu_load = LuLoad_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0,
Grow_0, CNP_0, Imper, ISRR, ISRA, Qretention, PctAreaInfil, Nqual, LoadRateImp,
LoadRatePerv, Storm, UrbBMPRed, FilterWidth, PctStrmBuf)[:, :, 1]
# luLoad is not needed because it is only defined for NUrb land use, and the others are only defined for NRur
return hstack(
(p_runoff + 0.001 * sed_deliv_ratio * eros_washoff * SedPhos, 12. * lu_load / NYrs / 2)) # + lu_load / NYrs / 2
| [
"gwlfe.Output.Loading.LuLoad.LuLoad",
"gwlfe.MultiUse_Fxns.Runoff.pRunoff.pRunoff_f",
"gwlfe.MultiUse_Fxns.Erosion.ErosWashoff.ErosWashoff",
"numpy.hstack",
"gwlfe.Output.Loading.LuLoad.LuLoad_f",
"numpy.zeros",
"gwlfe.MultiUse_Fxns.Runoff.pRunoff.pRunoff",
"gwlfe.Input.LandUse.NLU.NLU",
"gwlfe.Mult... | [((1008, 1025), 'numpy.zeros', 'zeros', (['(NYrs, 16)'], {}), '((NYrs, 16))\n', (1013, 1025), False, 'from numpy import zeros\n'), ((1041, 1244), 'gwlfe.MultiUse_Fxns.Runoff.pRunoff.pRunoff', 'pRunoff', (['NYrs', 'DaysMonth', 'InitSnow_0', 'Temp', 'Prec', 'AntMoist_0', 'NRur', 'NUrb', 'CN', 'Grow_0', 'Area', 'PhosConc', 'ManuredAreas', 'FirstManureMonth', 'LastManureMonth', 'ManPhos', 'FirstManureMonth2', 'LastManureMonth2'], {}), '(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN,\n Grow_0, Area, PhosConc, ManuredAreas, FirstManureMonth, LastManureMonth,\n ManPhos, FirstManureMonth2, LastManureMonth2)\n', (1048, 1244), False, 'from gwlfe.MultiUse_Fxns.Runoff.pRunoff import pRunoff\n'), ((1305, 1335), 'gwlfe.MultiUse_Fxns.Erosion.SedDelivRatio.SedDelivRatio', 'SedDelivRatio', (['SedDelivRatio_0'], {}), '(SedDelivRatio_0)\n', (1318, 1335), False, 'from gwlfe.MultiUse_Fxns.Erosion.SedDelivRatio import SedDelivRatio\n'), ((1355, 1450), 'gwlfe.MultiUse_Fxns.Erosion.ErosWashoff.ErosWashoff', 'ErosWashoff', (['NYrs', 'DaysMonth', 'InitSnow_0', 'Temp', 'Prec', 'NRur', 'NUrb', 'Acoef', 'KF', 'LS', 'C', 'P', 'Area'], {}), '(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, NUrb, Acoef, KF,\n LS, C, P, Area)\n', (1366, 1450), False, 'from gwlfe.MultiUse_Fxns.Erosion.ErosWashoff import ErosWashoff\n'), ((1492, 1726), 'gwlfe.Output.Loading.LuLoad.LuLoad', 'LuLoad', (['NYrs', 'DaysMonth', 'Temp', 'InitSnow_0', 'Prec', 'NRur', 'NUrb', 'Area', 'CNI_0', 'AntMoist_0', 'Grow_0', 'CNP_0', 'Imper', 'ISRR', 'ISRA', 'Qretention', 'PctAreaInfil', 'Nqual', 'LoadRateImp', 'LoadRatePerv', 'Storm', 'UrbBMPRed', 'FilterWidth', 'PctStrmBuf'], {}), '(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0,\n AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, Qretention, PctAreaInfil,\n Nqual, LoadRateImp, LoadRatePerv, Storm, UrbBMPRed, FilterWidth, PctStrmBuf\n )\n', (1498, 1726), False, 'from gwlfe.Output.Loading.LuLoad import LuLoad\n'), ((1766, 1781), 'gwlfe.Input.LandUse.NLU.NLU', 'NLU', (['NRur', 'NUrb'], {}), '(NRur, NUrb)\n', (1769, 1781), False, 'from gwlfe.Input.LandUse.NLU import NLU\n'), ((2939, 2969), 'gwlfe.MultiUse_Fxns.Erosion.SedDelivRatio.SedDelivRatio', 'SedDelivRatio', (['SedDelivRatio_0'], {}), '(SedDelivRatio_0)\n', (2952, 2969), False, 'from gwlfe.MultiUse_Fxns.Erosion.SedDelivRatio import SedDelivRatio\n'), ((3547, 3647), 'numpy.hstack', 'hstack', (['(p_runoff + 0.001 * sed_deliv_ratio * eros_washoff * SedPhos, 12.0 *\n lu_load / NYrs / 2)'], {}), '((p_runoff + 0.001 * sed_deliv_ratio * eros_washoff * SedPhos, 12.0 *\n lu_load / NYrs / 2))\n', (3553, 3647), False, 'from numpy import hstack\n'), ((2674, 2879), 'gwlfe.MultiUse_Fxns.Runoff.pRunoff.pRunoff_f', 'pRunoff_f', (['NYrs', 'DaysMonth', 'InitSnow_0', 'Temp', 'Prec', 'AntMoist_0', 'NRur', 'NUrb', 'CN', 'Grow_0', 'Area', 'PhosConc', 'ManuredAreas', 'FirstManureMonth', 'LastManureMonth', 'ManPhos', 'FirstManureMonth2', 'LastManureMonth2'], {}), '(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb,\n CN, Grow_0, Area, PhosConc, ManuredAreas, FirstManureMonth,\n LastManureMonth, ManPhos, FirstManureMonth2, LastManureMonth2)\n', (2683, 2879), False, 'from gwlfe.MultiUse_Fxns.Runoff.pRunoff import pRunoff_f\n'), ((2994, 3085), 'gwlfe.MultiUse_Fxns.Erosion.ErosWashoff.ErosWashoff_f', 'ErosWashoff_f', (['NYrs', 'DaysMonth', 'InitSnow_0', 'Temp', 'Prec', 'NRur', 'Acoef', 'KF', 'LS', 'C', 'P', 'Area'], {}), '(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, Acoef, KF, LS,\n C, P, Area)\n', (3007, 3085), False, 'from gwlfe.MultiUse_Fxns.Erosion.ErosWashoff import ErosWashoff_f\n'), ((3143, 3379), 'gwlfe.Output.Loading.LuLoad.LuLoad_f', 'LuLoad_f', (['NYrs', 'DaysMonth', 'Temp', 'InitSnow_0', 'Prec', 'NRur', 'NUrb', 'Area', 'CNI_0', 'AntMoist_0', 'Grow_0', 'CNP_0', 'Imper', 'ISRR', 'ISRA', 'Qretention', 'PctAreaInfil', 'Nqual', 'LoadRateImp', 'LoadRatePerv', 'Storm', 'UrbBMPRed', 'FilterWidth', 'PctStrmBuf'], {}), '(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0,\n AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, Qretention, PctAreaInfil,\n Nqual, LoadRateImp, LoadRatePerv, Storm, UrbBMPRed, FilterWidth, PctStrmBuf\n )\n', (3151, 3379), False, 'from gwlfe.Output.Loading.LuLoad import LuLoad_f\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: <NAME>, <NAME>, <NAME>
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import numpy as np
from pymor.grids.interfaces import AffineGridInterface
from pymor.grids.referenceelements import triangle
class GmshParseError(Exception):
pass
def parse_gmsh_file(f):
allowed_sections = ['Nodes', 'Elements', 'PhysicalName', 'Periodic', 'NodeData',
'ElementData', 'ElementNodeData']
supported_sections = ['Nodes', 'Elements']
try:
l = next(f).strip()
if l != '$MeshFormat':
raise GmshParseError('expected $MeshFormat, got {}'.format(l))
l = next(f).strip()
header = l.split(' ')
if len(header) != 3:
raise GmshParseError('header {} has {} fields, expected 3'.format(l, len(header)))
if header[0] != '2.2':
raise GmshParseError('wrong file format version: got {}, expected 2.2'.format(header[0]))
try:
file_type = int(header[1])
except ValueError:
raise GmshParseError('malformed header: expected integer, got {}'.format(header[1]))
if file_type != 0:
raise GmshParseError('wrong file type: only ASCII gmsh files are supported')
try:
data_size = int(header[2]) # NOQA
except ValueError:
raise GmshParseError('malformed header: expected integer, got {}'.format(header[2]))
l = next(f).strip()
if l != '$EndMeshFormat':
raise GmshParseError('expected $EndMeshFormat, got {}'.format(l))
except StopIteration:
raise GmshParseError('unexcpected end of file')
in_section = False
sections = defaultdict(list)
for l in f:
l = l.strip()
if l == '':
continue
if not in_section:
if not l.startswith('$'):
raise GmshParseError('expected section naem, got {}'.format(l))
section = l[1:]
if section not in allowed_sections:
raise GmshParseError('unknown section type: {}'.format(section))
if section not in supported_sections:
raise GmshParseError('unsopported section type: {}'.format(section))
if section in sections:
raise GmshParseError('only one {} section allowed'.format(section))
in_section = True
elif l.startswith('$'):
if l != '$End' + section:
raise GmshParseError('expected $End{}, got {}'.format(section, l))
in_section = False
else:
sections[section].append(l)
if in_section:
raise GmshParseError('file ended while in section {}'.format(section))
# now we parse each section ...
def parse_nodes(nodes):
try:
num_nodes = int(nodes[0])
except ValueError:
raise GmshParseError('first line of nodes sections is not a number: {}'.format(nodes[0]))
if len(nodes) != num_nodes + 1:
raise GmshParseError('number-of-nodes field does not match number of lines in nodes section')
nodes = [n.split(' ') for n in nodes[1:]]
if not all(len(n) == 4 for n in nodes):
raise GmshParseError('malformed nodes section')
try:
nodes = [(int(a), (float(b), float(c), float(d))) for a, b, c, d in nodes]
except ValueError:
raise GmshParseError('malformed nodes section')
return nodes
def parse_elements(elements):
try:
num_elements = int(elements[0])
except ValueError:
raise GmshParseError('first line of elements sections is not a number: {}'.format(elements[0]))
if len(elements) != num_elements + 1:
raise GmshParseError('number-of-elements field does not match number of lines in elements section')
elements = [e.split(' ') for e in elements[1:]]
try:
elements = [tuple(int(f) for f in e) for e in elements]
except ValueError:
raise GmshParseError('malformed elements section')
element_types = {1: 'line', 2: 'triangle'}
element_nodes = {'line': 2, 'triangle': 3}
def parse_line(fields):
if fields[1] not in element_types:
raise GmshParseError('element type {} not supported'.format(fields[0]))
element_type = element_types[fields[1]]
num_nodes = element_nodes[element_type]
num_tags = fields[2]
if len(fields) != num_nodes + num_tags + 3:
raise GmshParseError('malformed elements section')
return element_type, (fields[0], tuple(fields[3:3 + num_tags]), fields[3 + num_tags:])
elements_by_type = defaultdict(list)
for e in elements:
t, l = parse_line(e)
elements_by_type[t].append(l)
return elements_by_type
parser_map = {'Nodes': parse_nodes, 'Elements': parse_elements}
for k, v in sections.iteritems():
sections[k] = parser_map[k](v)
return sections
class GmshGrid(AffineGridInterface):
dim = 2
dim_outer = 2
reference_element = triangle
def __init__(self, gmsh_file):
self.logger.info('Parsing gmsh file ...')
sections = parse_gmsh_file(gmsh_file)
self.logger.info('Checking is grid is a 2d triangular grid ...')
assert {'Nodes', 'Elements'} <= set(sections.keys())
assert set(sections['Elements'].keys()) <= {'line', 'triangle'}
assert 'triangle' in sections['Elements']
assert all(n[1][2] == 0 for n in sections['Nodes'])
self.logger.info('Creating entity maps ...')
node_ids = {}
for i, n in enumerate(sections['Nodes']):
node_ids[n[0]] = i
line_ids = {}
if 'line' in sections['Elements']:
for i, l in enumerate(sections['Elements']['line']):
line_ids[l[0]] = i
triangle_ids = {}
for i, t in enumerate(sections['Elements']['triangle']):
triangle_ids[t[0]] = i
self.logger.info('Building grid topology ...')
# the lines dict will hold the indices of lines defined by pairs of points
lines = {}
if 'line' in sections['Elements']:
for i, l in enumerate(sections['Elements']['line']):
lines[frozenset(l[2])] = i
codim1_subentities = np.empty((len(sections['Elements']['triangle']), 3), dtype=np.int32)
codim2_subentities = np.empty_like(codim1_subentities)
for i, t in enumerate(sections['Elements']['triangle']):
nodes = t[2]
codim2_subentities[i] = [node_ids[nodes[0]], node_ids[nodes[1]], node_ids[nodes[2]]]
edges = (frozenset(t[2][1:3]), frozenset((t[2][2], t[2][0])), frozenset((t[2][0:2])))
for e in edges:
if e not in lines:
lines[e] = len(lines)
codim1_subentities[i] = [lines[edges[0]], lines[edges[1]], lines[edges[2]]]
self.logger.info('Calculating embeddings ...')
codim2_centers = np.array([n[1][0:2] for n in sections['Nodes']])
SEC = codim2_centers[codim2_subentities]
SHIFTS = SEC[:, 0, :]
TRANS = SEC[:, 1:, :] - SHIFTS[:, np.newaxis, :]
TRANS = TRANS.swapaxes(1, 2)
self.__embeddings = (TRANS, SHIFTS)
self.__subentities = (np.arange(len(codim1_subentities), dtype=np.int32).reshape(-1, 1),
codim1_subentities, codim2_subentities)
self.__sizes = (len(codim1_subentities), len(lines), len(codim2_centers))
def __str__(self):
return 'GmshGrid with {} vertices, {} lines, {} triangles'.format(*self.__sizes)
def size(self, codim=0):
assert 0 <= codim <= 2, 'Invalid codimension'
return self.__sizes[codim]
def subentities(self, codim=0, subentity_codim=None):
assert 0 <= codim <= 2, 'Invalid codimension'
if subentity_codim is None:
subentity_codim = codim + 1
assert codim <= subentity_codim <= self.dim, 'Invalid subentity codimensoin'
if codim == 0:
return self.__subentities[subentity_codim]
else:
return super(GmshGrid, self).subentities(codim, subentity_codim)
def embeddings(self, codim=0):
if codim == 0:
return self.__embeddings
else:
return super(GmshGrid, self).embeddings(codim)
@staticmethod
def test_instances():
import os.path
return GmshGrid(open(os.path.join(os.path.dirname(__file__), '../../../../testdata/gmsh_1.msh'))),
| [
"numpy.empty_like",
"numpy.array",
"collections.defaultdict"
] | [((1885, 1902), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1896, 1902), False, 'from collections import defaultdict\n'), ((4939, 4956), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4950, 4956), False, 'from collections import defaultdict\n'), ((6702, 6735), 'numpy.empty_like', 'np.empty_like', (['codim1_subentities'], {}), '(codim1_subentities)\n', (6715, 6735), True, 'import numpy as np\n'), ((7296, 7344), 'numpy.array', 'np.array', (["[n[1][0:2] for n in sections['Nodes']]"], {}), "([n[1][0:2] for n in sections['Nodes']])\n", (7304, 7344), True, 'import numpy as np\n')] |
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def problem():
"""
Rotate Matrix: Given an image represented by an NxN matrix, where each pixel in the image is 4
bytes, write a method to rotate the image by 90 degrees. Can you do this in place?
"""
# According to my calculations, in order to perform the rotation in place, we need to do a specific cell value rotation
# If we want to move a single pixel, we would have to move all the corresponding pixels untill we find the one that goes to the current rotated pixel's place
# We need to do some sort of up-right-down-left motion
# This function works in O(r * c) where r are the rows and c are the columns
# In reality it is faster by a factor of 16
def rotateMatrix(matrix, rows, columns):
"""
This function rotates an image/matrix by 90 degrees clockwise
Solution Steps:
Step 1: Go through half the diagonal (bottom left to upper right)
Step 2: Rotate four cells on each iteration
"""
offset = 0
for row_bottom_offset in range(rows - 1, rows // 2, -1):
col_left_offset = 1
for row in range(row_bottom_offset, offset, -1):
current = matrix[row][offset]
matrix[row][offset] = matrix[row_bottom_offset][-offset - col_left_offset]
matrix[row_bottom_offset][-offset - col_left_offset] = matrix[rows - row - 1][-offset - 1]
matrix[rows - row - 1][-offset - 1] = matrix[rows - row_bottom_offset - 1][offset + col_left_offset - 1]
matrix[rows - row_bottom_offset - 1 ][offset + col_left_offset - 1] = current
col_left_offset += 1
offset += 1
def rotateImage(image):
image_matrix = mpimg.imread(image)
image_matrix_copy = image_matrix.copy()
image_matrix_copy = image_matrix_copy[0:300, 0:300, :1].reshape(300, 300)
rotateMatrix(image_matrix_copy, image_matrix.shape[0], image_matrix.shape[1] - 32)
plt.imshow(image_matrix_copy, cmap='gray')
plt.show()
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) >= 3:
rotateImage(args[2])
exit()
#matrix = [[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3 ,4 ,5 ,6 ,7 ,8], [1, 2, 3 ,4 ,5 ,6 ,7 ,8], [1, 2, 3 ,4 ,5 ,6 ,7 ,8], [1, 2, 3 ,4 ,5 ,6 ,7 , 8], [1, 2, 3 ,4 ,5 ,6 ,7 , 8], [1, 2, 3 ,4 ,5 ,6 ,7 , 8], [1, 2, 3 ,4 ,5 ,6 ,7 ,8]]
shape = (int(args[0]), int(args[1]))
matrix = np.random.randint(255, size=shape)
#shape = (8, 8)
#for r in matrix:
# print(r)
#print("\n")
rotateMatrix(matrix, shape[0], shape[1])
#for r in matrix:
# print(r)
| [
"matplotlib.pyplot.imshow",
"numpy.random.randint",
"matplotlib.image.imread",
"matplotlib.pyplot.show"
] | [((1758, 1777), 'matplotlib.image.imread', 'mpimg.imread', (['image'], {}), '(image)\n', (1770, 1777), True, 'import matplotlib.image as mpimg\n'), ((1992, 2034), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_matrix_copy'], {'cmap': '"""gray"""'}), "(image_matrix_copy, cmap='gray')\n", (2002, 2034), True, 'import matplotlib.pyplot as plt\n'), ((2039, 2049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2047, 2049), True, 'import matplotlib.pyplot as plt\n'), ((2470, 2504), 'numpy.random.randint', 'np.random.randint', (['(255)'], {'size': 'shape'}), '(255, size=shape)\n', (2487, 2504), True, 'import numpy as np\n')] |
import rinobot_plugin as bot
import re
import os
import numpy as np
_end_tags = dict(grid=':HEADER_END:', scan='SCANIT_END', spec='[DATA]')
class NanonisFile(object):
"""
Base class for Nanonis data files (grid, scan, point spectroscopy).
Handles methods and parsing tasks common to all Nanonis files.
Parameters
----------
fname : str
Name of Nanonis file.
Attributes
----------
datadir : str
Directory path for Nanonis file.
basename : str
Just the filename, no path.
fname : str
Full path of Nanonis file.
filetype : str
filetype corresponding to filename extension.
byte_offset : int
Size of header in bytes.
header_raw : str
Unproccessed header information.
"""
def __init__(self, fname):
self.datadir, self.basename = os.path.split(fname)
self.fname = fname
self.filetype = self._determine_filetype()
self.byte_offset = self.start_byte()
self.header_raw = self.read_raw_header(self.byte_offset)
def _determine_filetype(self):
"""
Check last three characters for appropriate file extension,
raise error if not.
Returns
-------
str
Filetype name associated with extension.
Raises
------
UnhandledFileError
If last three characters of filename are not one of '3ds',
'sxm', or 'dat'.
"""
if self.fname[-3:] == '3ds':
return 'grid'
elif self.fname[-3:] == 'sxm':
return 'scan'
elif self.fname[-3:] == 'dat':
return 'spec'
else:
raise UnhandledFileError('{} is not a supported filetype or does not exist'.format(self.basename))
def read_raw_header(self, byte_offset):
"""
Return header as a raw string.
Everything before the end tag is considered to be part of the header.
the parsing will be done later by subclass methods.
Parameters
----------
byte_offset : int
Size of header in bytes. Read up to this point in file.
Returns
-------
str
Contents of filename up to byte_offset as a decoded binary
string.
"""
with open(self.fname, 'rb') as f:
return f.read(byte_offset).decode()
def start_byte(self):
"""
Find first byte after end tag signalling end of header info.
Caveat, I believe this is the first byte after the end of the
line that the end tag is found on, not strictly the first byte
directly after the end tag is found. For example in Scan
__init__, byte_offset is incremented by 4 to account for a
'start' byte that is not actual data.
Returns
-------
int
Size of header in bytes.
"""
with open(self.fname, 'rb') as f:
byte_offset = -1
tag = _end_tags[self.filetype]
line = f.readline()
while(line != ''):
if tag in line.strip().decode():
byte_offset = f.tell()
break
line = f.readline()
if byte_offset == -1:
raise FileHeaderNotFoundError(
'Could not find the {} end tag in {}'.format(tag, self.basename)
)
return byte_offset
class Scan(NanonisFile):
"""
Nanonis scan file class.
Contains data loading methods specific to Nanonis sxm files. The
header is terminated by a 'SCANIT_END' tag followed by the \1A\04
code. The NanonisFile header parse method doesn't account for this
so the Scan __init__ method just adds 4 bytes to the byte_offset
attribute so as to not include this as a datapoint.
Data is structured a little differently from grid files, obviously.
For each pixel in the scan, each channel is recorded forwards and
backwards one after the other.
Currently cannot take scans that do not have both directions
recorded for each channel, nor incomplete scans.
Parameters
----------
fname : str
Filename for scan file.
Attributes
----------
header : dict
Parsed sxm header. Some fields are converted to float,
otherwise most are string values.
signals : dict
Dict keys correspond to channel name, values correspond to
another dict whose keys are simply forward and backward arrays
for the scan image.
Raises
------
UnhandledFileError
If fname does not have a '.sxm' extension.
"""
def __init__(self, fname):
_is_valid_file(fname, ext='sxm')
super(self.__class__, self).__init__(fname)
self.header = _parse_sxm_header(self.header_raw)
# data begins with 4 byte code, add 4 bytes to offset instead
self.byte_offset += 4
# load data
self.signals = self._load_data()
def _load_data(self):
"""
Read binary data for Nanonis sxm file.
Returns
-------
dict
Channel name keyed dict of each channel array.
"""
channs = list(self.header['data_info']['Name'])
nchanns = len(channs)
nx, ny = self.header['scan_pixels']
# assume both directions for now
ndir = 2
data_dict = dict()
# open and seek to start of data
f = open(self.fname, 'rb')
f.seek(self.byte_offset)
data_format = '>f4'
scandata = np.fromfile(f, dtype=data_format)
f.close()
# reshape
scandata_shaped = scandata.reshape(nchanns, ndir, nx, ny)
# extract data for each channel
for i, chann in enumerate(channs):
chann_dict = dict(forward=scandata_shaped[i, 0, :, :],
backward=scandata_shaped[i, 1, :, :])
data_dict[chann] = chann_dict
return data_dict
class UnhandledFileError(Exception):
"""
To be raised when unknown file extension is passed.
"""
pass
class FileHeaderNotFoundError(Exception):
"""
To be raised when no header information could be determined.
"""
pass
def _parse_sxm_header(header_raw):
"""
Parse raw header string.
Empirically done based on Nanonis header structure. See Scan
docstring or Nanonis help documentation for more details.
Parameters
----------
header_raw : str
Raw header string from read_raw_header() method.
Returns
-------
dict
Channel name keyed dict of each channel array.
"""
header_entries = header_raw.split('\n')
header_entries = header_entries[:-3]
header_dict = dict()
entries_to_be_split = ['scan_offset',
'scan_pixels',
'scan_range',
'scan_time']
entries_to_be_floated = ['scan_offset',
'scan_range',
'scan_time',
'bias',
'acq_time']
entries_to_be_inted = ['scan_pixels']
for i, entry in enumerate(header_entries):
if entry == ':DATA_INFO:' or entry == ':Z-CONTROLLER:':
count = 1
for j in range(i+1, len(header_entries)):
if header_entries[j].startswith(':'):
break
if header_entries[j][0] == '\t':
count += 1
header_dict[entry.strip(':').lower()] = _parse_scan_header_table(header_entries[i+1:i+count])
continue
if entry.startswith(':'):
header_dict[entry.strip(':').lower()] = header_entries[i+1].strip()
for key in entries_to_be_split:
header_dict[key] = header_dict[key].split()
for key in entries_to_be_floated:
if isinstance(header_dict[key], list):
header_dict[key] = np.asarray(header_dict[key], dtype=np.float)
else:
header_dict[key] = np.float(header_dict[key])
for key in entries_to_be_inted:
header_dict[key] = np.asarray(header_dict[key], dtype=np.int)
return header_dict
def _parse_scan_header_table(table_list):
"""
Parse scan file header entries whose values are tab-separated
tables.
"""
table_processed = []
for row in table_list:
# strip leading \t, split by \t
table_processed.append(row.strip('\t').split('\t'))
# column names are first row
keys = table_processed[0]
values = table_processed[1:]
zip_vals = list(zip(*values))
return dict(list(zip(keys, zip_vals)))
def _is_valid_file(fname, ext):
"""
Detect if invalid file is being initialized by class.
"""
if fname[-3:] != ext:
raise UnhandledFileError('{} is not a {} file'.format(fname, ext))
def print_to_asc(index, original_path, header):
template = """:NANONIS_VERSION:
%s
:SCANIT_TYPE:
FLOAT MSBFIRST
:REC_DATE:
%s
:REC_TIME:
%s
:REC_TEMP:
%s
:ACQ_TIME:
%s
:SCAN_PIXELS:
%s %s
:SCAN_FILE:
%s
:SCAN_TIME:
%s %s
:SCAN_RANGE:
%s %s
:SCAN_OFFSET:
%s %s
:SCAN_ANGLE:
%s
:SCAN_DIR:
%s
:BIAS:
%s
:DATA_INFO:
Channel Name Unit Direction Calibration Offset
%s %s %s %s %s %s
:SCANIT_END:
"""
printable = template % (
header["nanonis_version"],
header["rec_date"],
header["rec_time"],
header["rec_temp"],
header["acq_time"],
header["scan_pixels"][0],
header["scan_pixels"][1],
header["scan_file"],
header["scan_time"][0],
header["scan_time"][1],
header["scan_range"][0],
header["scan_range"][1],
header["scan_offset"][0],
header["scan_offset"][1],
header["scan_angle"],
header["scan_dir"],
header["bias"],
header["data_info"]["Channel"][index],
header["data_info"]["Name"][index],
header["data_info"]["Unit"][index],
header["data_info"]["Direction"][index],
header["data_info"]["Calibration"][index],
header["data_info"]["Offset"][index]
)
return printable
def main():
filepath = bot.filepath()
p = re.compile(r'.*.sxm')
if re.search(p, filepath):
nf = Scan(filepath)
for i, element in enumerate(nf.signals):
printable = print_to_asc(i, filepath, nf.header)
channel = list(nf.signals.keys())[i]
shape = nf.signals[channel]['forward'].shape
for direction in ['forward', 'backward']:
if direction == 'forward':
outname = bot.no_extension() + '[%s_fwd].txt' % channel
outpath = bot.output_filepath(outname)
with open(outpath, 'wt') as fp:
fp.write(printable)
with open(outpath, 'ab') as fp:
data_formatted = np.flipud(nf.signals[channel]['forward'].reshape((shape[1], shape[0])))
np.savetxt(fp, data_formatted)
if direction == 'backward':
outname = bot.no_extension() + '[%s_bwd].txt' % channel
outpath = bot.output_filepath(outname)
with open(outpath, 'wt') as fp:
fp.write(printable)
with open(outpath, 'ab') as fp:
data_formatted = np.flipud(nf.signals[channel]['backward'].reshape((shape[1], shape[0])))
np.savetxt(fp, data_formatted)
if __name__ == "__main__":
main()
| [
"numpy.fromfile",
"numpy.float",
"re.compile",
"numpy.asarray",
"os.path.split",
"rinobot_plugin.filepath",
"rinobot_plugin.no_extension",
"numpy.savetxt",
"rinobot_plugin.output_filepath",
"re.search"
] | [((10398, 10412), 'rinobot_plugin.filepath', 'bot.filepath', ([], {}), '()\n', (10410, 10412), True, 'import rinobot_plugin as bot\n'), ((10421, 10441), 're.compile', 're.compile', (['""".*.sxm"""'], {}), "('.*.sxm')\n", (10431, 10441), False, 'import re\n'), ((10451, 10473), 're.search', 're.search', (['p', 'filepath'], {}), '(p, filepath)\n', (10460, 10473), False, 'import re\n'), ((858, 878), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (871, 878), False, 'import os\n'), ((5627, 5660), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'data_format'}), '(f, dtype=data_format)\n', (5638, 5660), True, 'import numpy as np\n'), ((8218, 8260), 'numpy.asarray', 'np.asarray', (['header_dict[key]'], {'dtype': 'np.int'}), '(header_dict[key], dtype=np.int)\n', (8228, 8260), True, 'import numpy as np\n'), ((8038, 8082), 'numpy.asarray', 'np.asarray', (['header_dict[key]'], {'dtype': 'np.float'}), '(header_dict[key], dtype=np.float)\n', (8048, 8082), True, 'import numpy as np\n'), ((8128, 8154), 'numpy.float', 'np.float', (['header_dict[key]'], {}), '(header_dict[key])\n', (8136, 8154), True, 'import numpy as np\n'), ((10926, 10954), 'rinobot_plugin.output_filepath', 'bot.output_filepath', (['outname'], {}), '(outname)\n', (10945, 10954), True, 'import rinobot_plugin as bot\n'), ((11423, 11451), 'rinobot_plugin.output_filepath', 'bot.output_filepath', (['outname'], {}), '(outname)\n', (11442, 11451), True, 'import rinobot_plugin as bot\n'), ((10850, 10868), 'rinobot_plugin.no_extension', 'bot.no_extension', ([], {}), '()\n', (10866, 10868), True, 'import rinobot_plugin as bot\n'), ((11241, 11271), 'numpy.savetxt', 'np.savetxt', (['fp', 'data_formatted'], {}), '(fp, data_formatted)\n', (11251, 11271), True, 'import numpy as np\n'), ((11347, 11365), 'rinobot_plugin.no_extension', 'bot.no_extension', ([], {}), '()\n', (11363, 11365), True, 'import rinobot_plugin as bot\n'), ((11739, 11769), 'numpy.savetxt', 'np.savetxt', (['fp', 'data_formatted'], {}), '(fp, data_formatted)\n', (11749, 11769), True, 'import numpy as np\n')] |
import functools
import itertools
import warnings
import imghdr
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from numpy.testing import assert_array_equal
from seaborn._core.plot import Plot
from seaborn._core.scales import Nominal, Continuous
from seaborn._core.rules import categorical_order
from seaborn._core.moves import Move
from seaborn._marks.base import Mark
from seaborn._stats.base import Stat
from seaborn.external.version import Version
assert_vector_equal = functools.partial(
# TODO do we care about int/float dtype consistency?
# Eventually most variables become floats ... but does it matter when?
# (Or rather, does it matter if it happens too early?)
assert_series_equal, check_names=False, check_dtype=False,
)
def assert_gridspec_shape(ax, nrows=1, ncols=1):
gs = ax.get_gridspec()
if Version(mpl.__version__) < Version("3.2"):
assert gs._nrows == nrows
assert gs._ncols == ncols
else:
assert gs.nrows == nrows
assert gs.ncols == ncols
class MockMark(Mark):
_grouping_props = ["color"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.passed_keys = []
self.passed_data = []
self.passed_axes = []
self.passed_scales = None
self.passed_orient = None
self.n_splits = 0
def _plot(self, split_gen, scales, orient):
for keys, data, ax in split_gen():
self.n_splits += 1
self.passed_keys.append(keys)
self.passed_data.append(data)
self.passed_axes.append(ax)
self.passed_scales = scales
self.passed_orient = orient
def _legend_artist(self, variables, value, scales):
a = mpl.lines.Line2D([], [])
a.variables = variables
a.value = value
return a
class TestInit:
def test_empty(self):
p = Plot()
assert p._data.source_data is None
assert p._data.source_vars == {}
def test_data_only(self, long_df):
p = Plot(long_df)
assert p._data.source_data is long_df
assert p._data.source_vars == {}
def test_df_and_named_variables(self, long_df):
variables = {"x": "a", "y": "z"}
p = Plot(long_df, **variables)
for var, col in variables.items():
assert_vector_equal(p._data.frame[var], long_df[col])
assert p._data.source_data is long_df
assert p._data.source_vars.keys() == variables.keys()
def test_df_and_mixed_variables(self, long_df):
variables = {"x": "a", "y": long_df["z"]}
p = Plot(long_df, **variables)
for var, col in variables.items():
if isinstance(col, str):
assert_vector_equal(p._data.frame[var], long_df[col])
else:
assert_vector_equal(p._data.frame[var], col)
assert p._data.source_data is long_df
assert p._data.source_vars.keys() == variables.keys()
def test_vector_variables_only(self, long_df):
variables = {"x": long_df["a"], "y": long_df["z"]}
p = Plot(**variables)
for var, col in variables.items():
assert_vector_equal(p._data.frame[var], col)
assert p._data.source_data is None
assert p._data.source_vars.keys() == variables.keys()
def test_vector_variables_no_index(self, long_df):
variables = {"x": long_df["a"].to_numpy(), "y": long_df["z"].to_list()}
p = Plot(**variables)
for var, col in variables.items():
assert_vector_equal(p._data.frame[var], pd.Series(col))
assert p._data.names[var] is None
assert p._data.source_data is None
assert p._data.source_vars.keys() == variables.keys()
def test_data_only_named(self, long_df):
p = Plot(data=long_df)
assert p._data.source_data is long_df
assert p._data.source_vars == {}
def test_positional_and_named_data(self, long_df):
err = "`data` given by both name and position"
with pytest.raises(TypeError, match=err):
Plot(long_df, data=long_df)
@pytest.mark.parametrize("var", ["x", "y"])
def test_positional_and_named_xy(self, long_df, var):
err = f"`{var}` given by both name and position"
with pytest.raises(TypeError, match=err):
Plot(long_df, "a", "b", **{var: "c"})
def test_positional_data_x_y(self, long_df):
p = Plot(long_df, "a", "b")
assert p._data.source_data is long_df
assert list(p._data.source_vars) == ["x", "y"]
def test_positional_x_y(self, long_df):
p = Plot(long_df["a"], long_df["b"])
assert p._data.source_data is None
assert list(p._data.source_vars) == ["x", "y"]
def test_positional_data_x(self, long_df):
p = Plot(long_df, "a")
assert p._data.source_data is long_df
assert list(p._data.source_vars) == ["x"]
def test_positional_x(self, long_df):
p = Plot(long_df["a"])
assert p._data.source_data is None
assert list(p._data.source_vars) == ["x"]
def test_positional_too_many(self, long_df):
err = r"Plot\(\) accepts no more than 3 positional arguments \(data, x, y\)"
with pytest.raises(TypeError, match=err):
Plot(long_df, "x", "y", "z")
def test_unknown_keywords(self, long_df):
err = r"Plot\(\) got unexpected keyword argument\(s\): bad"
with pytest.raises(TypeError, match=err):
Plot(long_df, bad="x")
class TestLayerAddition:
def test_without_data(self, long_df):
p = Plot(long_df, x="x", y="y").add(MockMark()).plot()
layer, = p._layers
assert_frame_equal(p._data.frame, layer["data"].frame, check_dtype=False)
def test_with_new_variable_by_name(self, long_df):
p = Plot(long_df, x="x").add(MockMark(), y="y").plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x", "y"]
for var in "xy":
assert_vector_equal(layer["data"].frame[var], long_df[var])
def test_with_new_variable_by_vector(self, long_df):
p = Plot(long_df, x="x").add(MockMark(), y=long_df["y"]).plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x", "y"]
for var in "xy":
assert_vector_equal(layer["data"].frame[var], long_df[var])
def test_with_late_data_definition(self, long_df):
p = Plot().add(MockMark(), data=long_df, x="x", y="y").plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x", "y"]
for var in "xy":
assert_vector_equal(layer["data"].frame[var], long_df[var])
def test_with_new_data_definition(self, long_df):
long_df_sub = long_df.sample(frac=.5)
p = Plot(long_df, x="x", y="y").add(MockMark(), data=long_df_sub).plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x", "y"]
for var in "xy":
assert_vector_equal(
layer["data"].frame[var], long_df_sub[var].reindex(long_df.index)
)
def test_drop_variable(self, long_df):
p = Plot(long_df, x="x", y="y").add(MockMark(), y=None).plot()
layer, = p._layers
assert layer["data"].frame.columns.to_list() == ["x"]
assert_vector_equal(layer["data"].frame["x"], long_df["x"], check_dtype=False)
@pytest.mark.xfail(reason="Need decision on default stat")
def test_stat_default(self):
class MarkWithDefaultStat(Mark):
default_stat = Stat
p = Plot().add(MarkWithDefaultStat())
layer, = p._layers
assert layer["stat"].__class__ is Stat
def test_stat_nondefault(self):
class MarkWithDefaultStat(Mark):
default_stat = Stat
class OtherMockStat(Stat):
pass
p = Plot().add(MarkWithDefaultStat(), OtherMockStat())
layer, = p._layers
assert layer["stat"].__class__ is OtherMockStat
@pytest.mark.parametrize(
"arg,expected",
[("x", "x"), ("y", "y"), ("v", "x"), ("h", "y")],
)
def test_orient(self, arg, expected):
class MockStatTrackOrient(Stat):
def __call__(self, data, groupby, orient, scales):
self.orient_at_call = orient
return data
class MockMoveTrackOrient(Move):
def __call__(self, data, groupby, orient):
self.orient_at_call = orient
return data
s = MockStatTrackOrient()
m = MockMoveTrackOrient()
Plot(x=[1, 2, 3], y=[1, 2, 3]).add(MockMark(), s, m, orient=arg).plot()
assert s.orient_at_call == expected
assert m.orient_at_call == expected
def test_variable_list(self, long_df):
p = Plot(long_df, x="x", y="y")
assert p._variables == ["x", "y"]
p = Plot(long_df).add(MockMark(), x="x", y="y")
assert p._variables == ["x", "y"]
p = Plot(long_df, y="x", color="a").add(MockMark(), x="y")
assert p._variables == ["y", "color", "x"]
p = Plot(long_df, x="x", y="y", color="a").add(MockMark(), color=None)
assert p._variables == ["x", "y", "color"]
p = (
Plot(long_df, x="x", y="y")
.add(MockMark(), color="a")
.add(MockMark(), alpha="s")
)
assert p._variables == ["x", "y", "color", "alpha"]
p = Plot(long_df, y="x").pair(x=["a", "b"])
assert p._variables == ["y", "x0", "x1"]
def test_type_checks(self):
p = Plot()
with pytest.raises(TypeError, match="mark must be a Mark instance"):
p.add(MockMark)
class MockStat(Stat):
pass
with pytest.raises(TypeError, match="stat must be a Stat instance"):
p.add(MockMark(), MockStat)
class TestScaling:
def test_inference(self, long_df):
for col, scale_type in zip("zat", ["continuous", "nominal", "temporal"]):
p = Plot(long_df, x=col, y=col).add(MockMark()).plot()
for var in "xy":
assert p._scales[var].scale_type == scale_type
def test_inference_from_layer_data(self):
p = Plot().add(MockMark(), x=["a", "b", "c"]).plot()
assert p._scales["x"]("b") == 1
def test_inference_joins(self):
p = (
Plot(y=pd.Series([1, 2, 3, 4]))
.add(MockMark(), x=pd.Series([1, 2]))
.add(MockMark(), x=pd.Series(["a", "b"], index=[2, 3]))
.plot()
)
assert p._scales["x"]("a") == 2
def test_inferred_categorical_converter(self):
p = Plot(x=["b", "c", "a"]).add(MockMark()).plot()
ax = p._figure.axes[0]
assert ax.xaxis.convert_units("c") == 1
def test_explicit_categorical_converter(self):
p = Plot(y=[2, 1, 3]).scale(y=Nominal()).add(MockMark()).plot()
ax = p._figure.axes[0]
assert ax.yaxis.convert_units("3") == 2
@pytest.mark.xfail(reason="Temporal auto-conversion not implemented")
def test_categorical_as_datetime(self):
dates = ["1970-01-03", "1970-01-02", "1970-01-04"]
p = Plot(x=dates).scale(...).add(MockMark()).plot()
p # TODO
...
def test_faceted_log_scale(self):
p = Plot(y=[1, 10]).facet(col=["a", "b"]).scale(y="log").plot()
for ax in p._figure.axes:
xfm = ax.yaxis.get_transform().transform
assert_array_equal(xfm([1, 10, 100]), [0, 1, 2])
def test_paired_single_log_scale(self):
x0, x1 = [1, 2, 3], [1, 10, 100]
p = Plot().pair(x=[x0, x1]).scale(x1="log").plot()
ax_lin, ax_log = p._figure.axes
xfm_lin = ax_lin.xaxis.get_transform().transform
assert_array_equal(xfm_lin([1, 10, 100]), [1, 10, 100])
xfm_log = ax_log.xaxis.get_transform().transform
assert_array_equal(xfm_log([1, 10, 100]), [0, 1, 2])
@pytest.mark.xfail(reason="Custom log scale needs log name for consistency")
def test_log_scale_name(self):
p = Plot().scale(x="log").plot()
ax = p._figure.axes[0]
assert ax.get_xscale() == "log"
assert ax.get_yscale() == "linear"
def test_mark_data_log_transform_is_inverted(self, long_df):
col = "z"
m = MockMark()
Plot(long_df, x=col).scale(x="log").add(m).plot()
assert_vector_equal(m.passed_data[0]["x"], long_df[col])
def test_mark_data_log_transfrom_with_stat(self, long_df):
class Mean(Stat):
group_by_orient = True
def __call__(self, data, groupby, orient, scales):
other = {"x": "y", "y": "x"}[orient]
return groupby.agg(data, {other: "mean"})
col = "z"
grouper = "a"
m = MockMark()
s = Mean()
Plot(long_df, x=grouper, y=col).scale(y="log").add(m, s).plot()
expected = (
long_df[col]
.pipe(np.log)
.groupby(long_df[grouper], sort=False)
.mean()
.pipe(np.exp)
.reset_index(drop=True)
)
assert_vector_equal(m.passed_data[0]["y"], expected)
def test_mark_data_from_categorical(self, long_df):
col = "a"
m = MockMark()
Plot(long_df, x=col).add(m).plot()
levels = categorical_order(long_df[col])
level_map = {x: float(i) for i, x in enumerate(levels)}
assert_vector_equal(m.passed_data[0]["x"], long_df[col].map(level_map))
def test_mark_data_from_datetime(self, long_df):
col = "t"
m = MockMark()
Plot(long_df, x=col).add(m).plot()
expected = long_df[col].map(mpl.dates.date2num)
if Version(mpl.__version__) < Version("3.3"):
expected = expected + mpl.dates.date2num(np.datetime64('0000-12-31'))
assert_vector_equal(m.passed_data[0]["x"], expected)
def test_facet_categories(self):
m = MockMark()
p = Plot(x=["a", "b", "a", "c"]).facet(col=["x", "x", "y", "y"]).add(m).plot()
ax1, ax2 = p._figure.axes
assert len(ax1.get_xticks()) == 3
assert len(ax2.get_xticks()) == 3
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 2.], [2, 3]))
def test_facet_categories_unshared(self):
m = MockMark()
p = (
Plot(x=["a", "b", "a", "c"])
.facet(col=["x", "x", "y", "y"])
.configure(sharex=False)
.add(m)
.plot()
)
ax1, ax2 = p._figure.axes
assert len(ax1.get_xticks()) == 2
assert len(ax2.get_xticks()) == 2
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 1.], [2, 3]))
def test_facet_categories_single_dim_shared(self):
data = [
("a", 1, 1), ("b", 1, 1),
("a", 1, 2), ("c", 1, 2),
("b", 2, 1), ("d", 2, 1),
("e", 2, 2), ("e", 2, 1),
]
df = pd.DataFrame(data, columns=["x", "row", "col"]).assign(y=1)
m = MockMark()
p = (
Plot(df, x="x")
.facet(row="row", col="col")
.add(m)
.configure(sharex="row")
.plot()
)
axs = p._figure.axes
for ax in axs:
assert ax.get_xticks() == [0, 1, 2]
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 2.], [2, 3]))
assert_vector_equal(m.passed_data[2]["x"], pd.Series([0., 1., 2.], [4, 5, 7]))
assert_vector_equal(m.passed_data[3]["x"], pd.Series([2.], [6]))
def test_pair_categories(self):
data = [("a", "a"), ("b", "c")]
df = pd.DataFrame(data, columns=["x1", "x2"]).assign(y=1)
m = MockMark()
p = Plot(df, y="y").pair(x=["x1", "x2"]).add(m).plot()
ax1, ax2 = p._figure.axes
assert ax1.get_xticks() == [0, 1]
assert ax2.get_xticks() == [0, 1]
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 1.], [0, 1]))
@pytest.mark.xfail(
Version(mpl.__version__) < Version("3.4.0"),
reason="Sharing paired categorical axes requires matplotlib>3.4.0"
)
def test_pair_categories_shared(self):
data = [("a", "a"), ("b", "c")]
df = pd.DataFrame(data, columns=["x1", "x2"]).assign(y=1)
m = MockMark()
p = Plot(df, y="y").pair(x=["x1", "x2"]).add(m).configure(sharex=True).plot()
for ax in p._figure.axes:
assert ax.get_xticks() == [0, 1, 2]
print(m.passed_data)
assert_vector_equal(m.passed_data[0]["x"], pd.Series([0., 1.], [0, 1]))
assert_vector_equal(m.passed_data[1]["x"], pd.Series([0., 2.], [0, 1]))
def test_identity_mapping_linewidth(self):
m = MockMark()
x = y = [1, 2, 3, 4, 5]
lw = pd.Series([.5, .1, .1, .9, 3])
Plot(x=x, y=y, linewidth=lw).scale(linewidth=None).add(m).plot()
assert_vector_equal(m.passed_scales["linewidth"](lw), lw)
def test_pair_single_coordinate_stat_orient(self, long_df):
class MockStat(Stat):
def __call__(self, data, groupby, orient, scales):
self.orient = orient
return data
s = MockStat()
Plot(long_df).pair(x=["x", "y"]).add(MockMark(), s).plot()
assert s.orient == "x"
def test_inferred_nominal_passed_to_stat(self):
class MockStat(Stat):
def __call__(self, data, groupby, orient, scales):
self.scales = scales
return data
s = MockStat()
y = ["a", "a", "b", "c"]
Plot(y=y).add(MockMark(), s).plot()
assert s.scales["y"].scale_type == "nominal"
# TODO where should RGB consistency be enforced?
@pytest.mark.xfail(
reason="Correct output representation for color with identity scale undefined"
)
def test_identity_mapping_color_strings(self):
m = MockMark()
x = y = [1, 2, 3]
c = ["C0", "C2", "C1"]
Plot(x=x, y=y, color=c).scale(color=None).add(m).plot()
expected = mpl.colors.to_rgba_array(c)[:, :3]
assert_array_equal(m.passed_scales["color"](c), expected)
def test_identity_mapping_color_tuples(self):
m = MockMark()
x = y = [1, 2, 3]
c = [(1, 0, 0), (0, 1, 0), (1, 0, 0)]
Plot(x=x, y=y, color=c).scale(color=None).add(m).plot()
expected = mpl.colors.to_rgba_array(c)[:, :3]
assert_array_equal(m.passed_scales["color"](c), expected)
@pytest.mark.xfail(
reason="Need decision on what to do with scale defined for unused variable"
)
def test_undefined_variable_raises(self):
p = Plot(x=[1, 2, 3], color=["a", "b", "c"]).scale(y=Continuous())
err = r"No data found for variable\(s\) with explicit scale: {'y'}"
with pytest.raises(RuntimeError, match=err):
p.plot()
class TestPlotting:
def test_matplotlib_object_creation(self):
p = Plot().plot()
assert isinstance(p._figure, mpl.figure.Figure)
for sub in p._subplots:
assert isinstance(sub["ax"], mpl.axes.Axes)
def test_empty(self):
m = MockMark()
Plot().plot()
assert m.n_splits == 0
def test_single_split_single_layer(self, long_df):
m = MockMark()
p = Plot(long_df, x="f", y="z").add(m).plot()
assert m.n_splits == 1
assert m.passed_keys[0] == {}
assert m.passed_axes == [sub["ax"] for sub in p._subplots]
for col in p._data.frame:
assert_series_equal(m.passed_data[0][col], p._data.frame[col])
def test_single_split_multi_layer(self, long_df):
vs = [{"color": "a", "linewidth": "z"}, {"color": "b", "pattern": "c"}]
class NoGroupingMark(MockMark):
_grouping_props = []
ms = [NoGroupingMark(), NoGroupingMark()]
Plot(long_df).add(ms[0], **vs[0]).add(ms[1], **vs[1]).plot()
for m, v in zip(ms, vs):
for var, col in v.items():
assert_vector_equal(m.passed_data[0][var], long_df[col])
def check_splits_single_var(
self, data, mark, data_vars, split_var, split_col, split_keys
):
assert mark.n_splits == len(split_keys)
assert mark.passed_keys == [{split_var: key} for key in split_keys]
for i, key in enumerate(split_keys):
split_data = data[data[split_col] == key]
for var, col in data_vars.items():
assert_array_equal(mark.passed_data[i][var], split_data[col])
def check_splits_multi_vars(
self, data, mark, data_vars, split_vars, split_cols, split_keys
):
assert mark.n_splits == np.prod([len(ks) for ks in split_keys])
expected_keys = [
dict(zip(split_vars, level_keys))
for level_keys in itertools.product(*split_keys)
]
assert mark.passed_keys == expected_keys
for i, keys in enumerate(itertools.product(*split_keys)):
use_rows = pd.Series(True, data.index)
for var, col, key in zip(split_vars, split_cols, keys):
use_rows &= data[col] == key
split_data = data[use_rows]
for var, col in data_vars.items():
assert_array_equal(mark.passed_data[i][var], split_data[col])
@pytest.mark.parametrize(
"split_var", [
"color", # explicitly declared on the Mark
"group", # implicitly used for all Mark classes
])
def test_one_grouping_variable(self, long_df, split_var):
split_col = "a"
data_vars = {"x": "f", "y": "z", split_var: split_col}
m = MockMark()
p = Plot(long_df, **data_vars).add(m).plot()
split_keys = categorical_order(long_df[split_col])
sub, *_ = p._subplots
assert m.passed_axes == [sub["ax"] for _ in split_keys]
self.check_splits_single_var(
long_df, m, data_vars, split_var, split_col, split_keys
)
def test_two_grouping_variables(self, long_df):
split_vars = ["color", "group"]
split_cols = ["a", "b"]
data_vars = {"y": "z", **{var: col for var, col in zip(split_vars, split_cols)}}
m = MockMark()
p = Plot(long_df, **data_vars).add(m).plot()
split_keys = [categorical_order(long_df[col]) for col in split_cols]
sub, *_ = p._subplots
assert m.passed_axes == [
sub["ax"] for _ in itertools.product(*split_keys)
]
self.check_splits_multi_vars(
long_df, m, data_vars, split_vars, split_cols, split_keys
)
def test_facets_no_subgroups(self, long_df):
split_var = "col"
split_col = "b"
data_vars = {"x": "f", "y": "z"}
m = MockMark()
p = Plot(long_df, **data_vars).facet(**{split_var: split_col}).add(m).plot()
split_keys = categorical_order(long_df[split_col])
assert m.passed_axes == list(p._figure.axes)
self.check_splits_single_var(
long_df, m, data_vars, split_var, split_col, split_keys
)
def test_facets_one_subgroup(self, long_df):
facet_var, facet_col = fx = "col", "a"
group_var, group_col = gx = "group", "b"
split_vars, split_cols = zip(*[fx, gx])
data_vars = {"x": "f", "y": "z", group_var: group_col}
m = MockMark()
p = (
Plot(long_df, **data_vars)
.facet(**{facet_var: facet_col})
.add(m)
.plot()
)
split_keys = [categorical_order(long_df[col]) for col in [facet_col, group_col]]
assert m.passed_axes == [
ax
for ax in list(p._figure.axes)
for _ in categorical_order(long_df[group_col])
]
self.check_splits_multi_vars(
long_df, m, data_vars, split_vars, split_cols, split_keys
)
def test_layer_specific_facet_disabling(self, long_df):
axis_vars = {"x": "y", "y": "z"}
row_var = "a"
m = MockMark()
p = Plot(long_df, **axis_vars).facet(row=row_var).add(m, row=None).plot()
col_levels = categorical_order(long_df[row_var])
assert len(p._figure.axes) == len(col_levels)
for data in m.passed_data:
for var, col in axis_vars.items():
assert_vector_equal(data[var], long_df[col])
def test_paired_variables(self, long_df):
x = ["x", "y"]
y = ["f", "z"]
m = MockMark()
Plot(long_df).pair(x, y).add(m).plot()
var_product = itertools.product(x, y)
for data, (x_i, y_i) in zip(m.passed_data, var_product):
assert_vector_equal(data["x"], long_df[x_i].astype(float))
assert_vector_equal(data["y"], long_df[y_i].astype(float))
def test_paired_one_dimension(self, long_df):
x = ["y", "z"]
m = MockMark()
Plot(long_df).pair(x).add(m).plot()
for data, x_i in zip(m.passed_data, x):
assert_vector_equal(data["x"], long_df[x_i].astype(float))
def test_paired_variables_one_subset(self, long_df):
x = ["x", "y"]
y = ["f", "z"]
group = "a"
long_df["x"] = long_df["x"].astype(float) # simplify vector comparison
m = MockMark()
Plot(long_df, group=group).pair(x, y).add(m).plot()
groups = categorical_order(long_df[group])
var_product = itertools.product(x, y, groups)
for data, (x_i, y_i, g_i) in zip(m.passed_data, var_product):
rows = long_df[group] == g_i
assert_vector_equal(data["x"], long_df.loc[rows, x_i])
assert_vector_equal(data["y"], long_df.loc[rows, y_i])
def test_paired_and_faceted(self, long_df):
x = ["y", "z"]
y = "f"
row = "c"
m = MockMark()
Plot(long_df, y=y).facet(row=row).pair(x).add(m).plot()
facets = categorical_order(long_df[row])
var_product = itertools.product(x, facets)
for data, (x_i, f_i) in zip(m.passed_data, var_product):
rows = long_df[row] == f_i
assert_vector_equal(data["x"], long_df.loc[rows, x_i])
assert_vector_equal(data["y"], long_df.loc[rows, y])
def test_movement(self, long_df):
orig_df = long_df.copy(deep=True)
class MockMove(Move):
def __call__(self, data, groupby, orient):
return data.assign(x=data["x"] + 1)
m = MockMark()
Plot(long_df, x="z", y="z").add(m, move=MockMove()).plot()
assert_vector_equal(m.passed_data[0]["x"], long_df["z"] + 1)
assert_vector_equal(m.passed_data[0]["y"], long_df["z"])
assert_frame_equal(long_df, orig_df) # Test data was not mutated
def test_movement_log_scale(self, long_df):
class MockMove(Move):
def __call__(self, data, groupby, orient):
return data.assign(x=data["x"] - 1)
m = MockMark()
Plot(
long_df, x="z", y="z"
).scale(x="log").add(m, move=MockMove()).plot()
assert_vector_equal(m.passed_data[0]["x"], long_df["z"] / 10)
def test_methods_clone(self, long_df):
p1 = Plot(long_df, "x", "y")
p2 = p1.add(MockMark()).facet("a")
assert p1 is not p2
assert not p1._layers
assert not p1._facet_spec
def test_default_is_no_pyplot(self):
p = Plot().plot()
assert not plt.get_fignums()
assert isinstance(p._figure, mpl.figure.Figure)
def test_with_pyplot(self):
p = Plot().plot(pyplot=True)
assert len(plt.get_fignums()) == 1
fig = plt.gcf()
assert p._figure is fig
def test_show(self):
p = Plot()
with warnings.catch_warnings(record=True) as msg:
out = p.show(block=False)
assert out is None
assert not hasattr(p, "_figure")
assert len(plt.get_fignums()) == 1
fig = plt.gcf()
gui_backend = (
# From https://github.com/matplotlib/matplotlib/issues/20281
fig.canvas.manager.show != mpl.backend_bases.FigureManagerBase.show
)
if not gui_backend:
assert msg
def test_png_representation(self):
p = Plot()
data, metadata = p._repr_png_()
assert not hasattr(p, "_figure")
assert isinstance(data, bytes)
assert imghdr.what("", data) == "png"
assert sorted(metadata) == ["height", "width"]
# TODO test retina scaling
@pytest.mark.xfail(reason="Plot.save not yet implemented")
def test_save(self):
Plot().save()
def test_on_axes(self):
ax = mpl.figure.Figure().subplots()
m = MockMark()
p = Plot().on(ax).add(m).plot()
assert m.passed_axes == [ax]
assert p._figure is ax.figure
@pytest.mark.parametrize("facet", [True, False])
def test_on_figure(self, facet):
f = mpl.figure.Figure()
m = MockMark()
p = Plot().on(f).add(m)
if facet:
p = p.facet(["a", "b"])
p = p.plot()
assert m.passed_axes == f.axes
assert p._figure is f
@pytest.mark.skipif(
Version(mpl.__version__) < Version("3.4"),
reason="mpl<3.4 does not have SubFigure",
)
@pytest.mark.parametrize("facet", [True, False])
def test_on_subfigure(self, facet):
sf1, sf2 = mpl.figure.Figure().subfigures(2)
sf1.subplots()
m = MockMark()
p = Plot().on(sf2).add(m)
if facet:
p = p.facet(["a", "b"])
p = p.plot()
assert m.passed_axes == sf2.figure.axes[1:]
assert p._figure is sf2.figure
def test_on_type_check(self):
p = Plot()
with pytest.raises(TypeError, match="The `Plot.on`.+<class 'list'>"):
p.on([])
def test_on_axes_with_subplots_error(self):
ax = mpl.figure.Figure().subplots()
p1 = Plot().facet(["a", "b"]).on(ax)
with pytest.raises(RuntimeError, match="Cannot create multiple subplots"):
p1.plot()
p2 = Plot().pair([["a", "b"], ["x", "y"]]).on(ax)
with pytest.raises(RuntimeError, match="Cannot create multiple subplots"):
p2.plot()
def test_axis_labels_from_constructor(self, long_df):
ax, = Plot(long_df, x="a", y="b").plot()._figure.axes
assert ax.get_xlabel() == "a"
assert ax.get_ylabel() == "b"
ax, = Plot(x=long_df["a"], y=long_df["b"].to_numpy()).plot()._figure.axes
assert ax.get_xlabel() == "a"
assert ax.get_ylabel() == ""
def test_axis_labels_from_layer(self, long_df):
m = MockMark()
ax, = Plot(long_df).add(m, x="a", y="b").plot()._figure.axes
assert ax.get_xlabel() == "a"
assert ax.get_ylabel() == "b"
p = Plot().add(m, x=long_df["a"], y=long_df["b"].to_list())
ax, = p.plot()._figure.axes
assert ax.get_xlabel() == "a"
assert ax.get_ylabel() == ""
def test_axis_labels_are_first_name(self, long_df):
m = MockMark()
p = (
Plot(long_df, x=long_df["z"].to_list(), y="b")
.add(m, x="a")
.add(m, x="x", y="y")
)
ax, = p.plot()._figure.axes
assert ax.get_xlabel() == "a"
assert ax.get_ylabel() == "b"
class TestFacetInterface:
@pytest.fixture(scope="class", params=["row", "col"])
def dim(self, request):
return request.param
@pytest.fixture(scope="class", params=["reverse", "subset", "expand"])
def reorder(self, request):
return {
"reverse": lambda x: x[::-1],
"subset": lambda x: x[:-1],
"expand": lambda x: x + ["z"],
}[request.param]
def check_facet_results_1d(self, p, df, dim, key, order=None):
p = p.plot()
order = categorical_order(df[key], order)
assert len(p._figure.axes) == len(order)
other_dim = {"row": "col", "col": "row"}[dim]
for subplot, level in zip(p._subplots, order):
assert subplot[dim] == level
assert subplot[other_dim] is None
assert subplot["ax"].get_title() == f"{key} = {level}"
assert_gridspec_shape(subplot["ax"], **{f"n{dim}s": len(order)})
def test_1d(self, long_df, dim):
key = "a"
p = Plot(long_df).facet(**{dim: key})
self.check_facet_results_1d(p, long_df, dim, key)
def test_1d_as_vector(self, long_df, dim):
key = "a"
p = Plot(long_df).facet(**{dim: long_df[key]})
self.check_facet_results_1d(p, long_df, dim, key)
def test_1d_with_order(self, long_df, dim, reorder):
key = "a"
order = reorder(categorical_order(long_df[key]))
p = Plot(long_df).facet(**{dim: key, "order": order})
self.check_facet_results_1d(p, long_df, dim, key, order)
def check_facet_results_2d(self, p, df, variables, order=None):
p = p.plot()
if order is None:
order = {dim: categorical_order(df[key]) for dim, key in variables.items()}
levels = itertools.product(*[order[dim] for dim in ["row", "col"]])
assert len(p._subplots) == len(list(levels))
for subplot, (row_level, col_level) in zip(p._subplots, levels):
assert subplot["row"] == row_level
assert subplot["col"] == col_level
assert subplot["axes"].get_title() == (
f"{variables['row']} = {row_level} | {variables['col']} = {col_level}"
)
assert_gridspec_shape(
subplot["axes"], len(levels["row"]), len(levels["col"])
)
def test_2d(self, long_df):
variables = {"row": "a", "col": "c"}
p = Plot(long_df).facet(**variables)
self.check_facet_results_2d(p, long_df, variables)
def test_2d_with_order(self, long_df, reorder):
variables = {"row": "a", "col": "c"}
order = {
dim: reorder(categorical_order(long_df[key]))
for dim, key in variables.items()
}
p = Plot(long_df).facet(**variables, order=order)
self.check_facet_results_2d(p, long_df, variables, order)
def test_axis_sharing(self, long_df):
variables = {"row": "a", "col": "c"}
p = Plot(long_df).facet(**variables)
p1 = p.plot()
root, *other = p1._figure.axes
for axis in "xy":
shareset = getattr(root, f"get_shared_{axis}_axes")()
assert all(shareset.joined(root, ax) for ax in other)
p2 = p.configure(sharex=False, sharey=False).plot()
root, *other = p2._figure.axes
for axis in "xy":
shareset = getattr(root, f"get_shared_{axis}_axes")()
assert not any(shareset.joined(root, ax) for ax in other)
p3 = p.configure(sharex="col", sharey="row").plot()
shape = (
len(categorical_order(long_df[variables["row"]])),
len(categorical_order(long_df[variables["col"]])),
)
axes_matrix = np.reshape(p3._figure.axes, shape)
for (shared, unshared), vectors in zip(
["yx", "xy"], [axes_matrix, axes_matrix.T]
):
for root, *other in vectors:
shareset = {
axis: getattr(root, f"get_shared_{axis}_axes")() for axis in "xy"
}
assert all(shareset[shared].joined(root, ax) for ax in other)
assert not any(shareset[unshared].joined(root, ax) for ax in other)
def test_col_wrapping(self):
cols = list("abcd")
wrap = 3
p = Plot().facet(col=cols, wrap=wrap).plot()
assert len(p._figure.axes) == 4
assert_gridspec_shape(p._figure.axes[0], len(cols) // wrap + 1, wrap)
# TODO test axis labels and titles
def test_row_wrapping(self):
rows = list("abcd")
wrap = 3
p = Plot().facet(row=rows, wrap=wrap).plot()
assert_gridspec_shape(p._figure.axes[0], wrap, len(rows) // wrap + 1)
assert len(p._figure.axes) == 4
# TODO test axis labels and titles
class TestPairInterface:
def check_pair_grid(self, p, x, y):
xys = itertools.product(y, x)
for (y_i, x_j), subplot in zip(xys, p._subplots):
ax = subplot["ax"]
assert ax.get_xlabel() == "" if x_j is None else x_j
assert ax.get_ylabel() == "" if y_i is None else y_i
assert_gridspec_shape(subplot["ax"], len(y), len(x))
@pytest.mark.parametrize(
"vector_type", [list, np.array, pd.Series, pd.Index]
)
def test_all_numeric(self, long_df, vector_type):
x, y = ["x", "y", "z"], ["s", "f"]
p = Plot(long_df).pair(vector_type(x), vector_type(y)).plot()
self.check_pair_grid(p, x, y)
def test_single_variable_key_raises(self, long_df):
p = Plot(long_df)
err = "You must pass a sequence of variable keys to `y`"
with pytest.raises(TypeError, match=err):
p.pair(x=["x", "y"], y="z")
@pytest.mark.parametrize("dim", ["x", "y"])
def test_single_dimension(self, long_df, dim):
variables = {"x": None, "y": None}
variables[dim] = ["x", "y", "z"]
p = Plot(long_df).pair(**variables).plot()
variables = {k: [v] if v is None else v for k, v in variables.items()}
self.check_pair_grid(p, **variables)
def test_non_cross(self, long_df):
x = ["x", "y"]
y = ["f", "z"]
p = Plot(long_df).pair(x, y, cross=False).plot()
for i, subplot in enumerate(p._subplots):
ax = subplot["ax"]
assert ax.get_xlabel() == x[i]
assert ax.get_ylabel() == y[i]
assert_gridspec_shape(ax, 1, len(x))
root, *other = p._figure.axes
for axis in "xy":
shareset = getattr(root, f"get_shared_{axis}_axes")()
assert not any(shareset.joined(root, ax) for ax in other)
def test_with_no_variables(self, long_df):
all_cols = long_df.columns
p1 = Plot(long_df).pair()
for axis in "xy":
actual = [
v for k, v in p1._pair_spec["variables"].items() if k.startswith(axis)
]
assert actual == all_cols.to_list()
p2 = Plot(long_df, y="y").pair()
x_vars = [
v for k, v in p2._pair_spec["variables"].items() if k.startswith("x")
]
assert all_cols.difference(x_vars).item() == "y"
assert "y" not in p2._pair_spec
p3 = Plot(long_df, color="a").pair()
for axis in "xy":
x_vars = [
v for k, v in p3._pair_spec["variables"].items() if k.startswith("x")
]
assert all_cols.difference(x_vars).item() == "a"
with pytest.raises(RuntimeError, match="You must pass `data`"):
Plot().pair()
def test_with_facets(self, long_df):
x = "x"
y = ["y", "z"]
col = "a"
p = Plot(long_df, x=x).facet(col).pair(y=y).plot()
facet_levels = categorical_order(long_df[col])
dims = itertools.product(y, facet_levels)
for (y_i, col_i), subplot in zip(dims, p._subplots):
ax = subplot["ax"]
assert ax.get_xlabel() == x
assert ax.get_ylabel() == y_i
assert ax.get_title() == f"{col} = {col_i}"
assert_gridspec_shape(ax, len(y), len(facet_levels))
@pytest.mark.parametrize("variables", [("rows", "y"), ("columns", "x")])
def test_error_on_facet_overlap(self, long_df, variables):
facet_dim, pair_axis = variables
p = Plot(long_df).facet(**{facet_dim[:3]: "a"}).pair(**{pair_axis: ["x", "y"]})
expected = f"Cannot facet the {facet_dim} while pairing on `{pair_axis}`."
with pytest.raises(RuntimeError, match=expected):
p.plot()
@pytest.mark.parametrize("variables", [("columns", "y"), ("rows", "x")])
def test_error_on_wrap_overlap(self, long_df, variables):
facet_dim, pair_axis = variables
p = (
Plot(long_df)
.facet(wrap=2, **{facet_dim[:3]: "a"})
.pair(**{pair_axis: ["x", "y"]})
)
expected = f"Cannot wrap the {facet_dim} while pairing on `{pair_axis}``."
with pytest.raises(RuntimeError, match=expected):
p.plot()
def test_axis_sharing(self, long_df):
p = Plot(long_df).pair(x=["a", "b"], y=["y", "z"])
shape = 2, 2
p1 = p.plot()
axes_matrix = np.reshape(p1._figure.axes, shape)
for root, *other in axes_matrix: # Test row-wise sharing
x_shareset = getattr(root, "get_shared_x_axes")()
assert not any(x_shareset.joined(root, ax) for ax in other)
y_shareset = getattr(root, "get_shared_y_axes")()
assert all(y_shareset.joined(root, ax) for ax in other)
for root, *other in axes_matrix.T: # Test col-wise sharing
x_shareset = getattr(root, "get_shared_x_axes")()
assert all(x_shareset.joined(root, ax) for ax in other)
y_shareset = getattr(root, "get_shared_y_axes")()
assert not any(y_shareset.joined(root, ax) for ax in other)
p2 = p.configure(sharex=False, sharey=False).plot()
root, *other = p2._figure.axes
for axis in "xy":
shareset = getattr(root, f"get_shared_{axis}_axes")()
assert not any(shareset.joined(root, ax) for ax in other)
def test_axis_sharing_with_facets(self, long_df):
p = Plot(long_df, y="y").pair(x=["a", "b"]).facet(row="c").plot()
shape = 2, 2
axes_matrix = np.reshape(p._figure.axes, shape)
for root, *other in axes_matrix: # Test row-wise sharing
x_shareset = getattr(root, "get_shared_x_axes")()
assert not any(x_shareset.joined(root, ax) for ax in other)
y_shareset = getattr(root, "get_shared_y_axes")()
assert all(y_shareset.joined(root, ax) for ax in other)
for root, *other in axes_matrix.T: # Test col-wise sharing
x_shareset = getattr(root, "get_shared_x_axes")()
assert all(x_shareset.joined(root, ax) for ax in other)
y_shareset = getattr(root, "get_shared_y_axes")()
assert all(y_shareset.joined(root, ax) for ax in other)
def test_x_wrapping(self, long_df):
x_vars = ["f", "x", "y", "z"]
wrap = 3
p = Plot(long_df, y="y").pair(x=x_vars, wrap=wrap).plot()
assert_gridspec_shape(p._figure.axes[0], len(x_vars) // wrap + 1, wrap)
assert len(p._figure.axes) == len(x_vars)
# TODO test axis labels and visibility
def test_y_wrapping(self, long_df):
y_vars = ["f", "x", "y", "z"]
wrap = 3
p = Plot(long_df, x="x").pair(y=y_vars, wrap=wrap).plot()
assert_gridspec_shape(p._figure.axes[0], wrap, len(y_vars) // wrap + 1)
assert len(p._figure.axes) == len(y_vars)
# TODO test axis labels and visibility
def test_non_cross_wrapping(self, long_df):
x_vars = ["a", "b", "c", "t"]
y_vars = ["f", "x", "y", "z"]
wrap = 3
p = (
Plot(long_df, x="x")
.pair(x=x_vars, y=y_vars, wrap=wrap, cross=False)
.plot()
)
assert_gridspec_shape(p._figure.axes[0], len(x_vars) // wrap + 1, wrap)
assert len(p._figure.axes) == len(x_vars)
def test_orient_inference(self, long_df):
orient_list = []
class CaptureMoveOrient(Move):
def __call__(self, data, groupby, orient):
orient_list.append(orient)
return data
(
Plot(long_df, x="x")
.pair(y=["b", "z"])
.add(MockMark(), move=CaptureMoveOrient())
.plot()
)
assert orient_list == ["y", "x"]
def test_two_variables_single_order_error(self, long_df):
p = Plot(long_df)
err = "When faceting on both col= and row=, passing `order`"
with pytest.raises(RuntimeError, match=err):
p.facet(col="a", row="b", order=["a", "b", "c"])
class TestLabelVisibility:
def test_single_subplot(self, long_df):
x, y = "a", "z"
p = Plot(long_df, x=x, y=y).plot()
subplot, *_ = p._subplots
ax = subplot["ax"]
assert ax.xaxis.get_label().get_visible()
assert ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
assert all(t.get_visible() for t in ax.get_yticklabels())
@pytest.mark.parametrize(
"facet_kws,pair_kws", [({"col": "b"}, {}), ({}, {"x": ["x", "y", "f"]})]
)
def test_1d_column(self, long_df, facet_kws, pair_kws):
x = None if "x" in pair_kws else "a"
y = "z"
p = Plot(long_df, x=x, y=y).plot()
first, *other = p._subplots
ax = first["ax"]
assert ax.xaxis.get_label().get_visible()
assert ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
assert all(t.get_visible() for t in ax.get_yticklabels())
for s in other:
ax = s["ax"]
assert ax.xaxis.get_label().get_visible()
assert not ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
assert not any(t.get_visible() for t in ax.get_yticklabels())
@pytest.mark.parametrize(
"facet_kws,pair_kws", [({"row": "b"}, {}), ({}, {"y": ["x", "y", "f"]})]
)
def test_1d_row(self, long_df, facet_kws, pair_kws):
x = "z"
y = None if "y" in pair_kws else "z"
p = Plot(long_df, x=x, y=y).plot()
first, *other = p._subplots
ax = first["ax"]
assert ax.xaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
assert ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_yticklabels())
for s in other:
ax = s["ax"]
assert not ax.xaxis.get_label().get_visible()
assert ax.yaxis.get_label().get_visible()
assert not any(t.get_visible() for t in ax.get_xticklabels())
assert all(t.get_visible() for t in ax.get_yticklabels())
def test_1d_column_wrapped(self):
p = Plot().facet(col=["a", "b", "c", "d"], wrap=3).plot()
subplots = list(p._subplots)
for s in [subplots[0], subplots[-1]]:
ax = s["ax"]
assert ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_yticklabels())
for s in subplots[1:]:
ax = s["ax"]
assert ax.xaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
for s in subplots[1:-1]:
ax = s["ax"]
assert not ax.yaxis.get_label().get_visible()
assert not any(t.get_visible() for t in ax.get_yticklabels())
ax = subplots[0]["ax"]
assert not ax.xaxis.get_label().get_visible()
assert not any(t.get_visible() for t in ax.get_xticklabels())
def test_1d_row_wrapped(self):
p = Plot().facet(row=["a", "b", "c", "d"], wrap=3).plot()
subplots = list(p._subplots)
for s in subplots[:-1]:
ax = s["ax"]
assert ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_yticklabels())
for s in subplots[-2:]:
ax = s["ax"]
assert ax.xaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
for s in subplots[:-2]:
ax = s["ax"]
assert not ax.xaxis.get_label().get_visible()
assert not any(t.get_visible() for t in ax.get_xticklabels())
ax = subplots[-1]["ax"]
assert not ax.yaxis.get_label().get_visible()
assert not any(t.get_visible() for t in ax.get_yticklabels())
def test_1d_column_wrapped_non_cross(self, long_df):
p = (
Plot(long_df)
.pair(x=["a", "b", "c"], y=["x", "y", "z"], wrap=2, cross=False)
.plot()
)
for s in p._subplots:
ax = s["ax"]
assert ax.xaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
assert ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_yticklabels())
def test_2d(self):
p = Plot().facet(col=["a", "b"], row=["x", "y"]).plot()
subplots = list(p._subplots)
for s in subplots[:2]:
ax = s["ax"]
assert not ax.xaxis.get_label().get_visible()
assert not any(t.get_visible() for t in ax.get_xticklabels())
for s in subplots[2:]:
ax = s["ax"]
assert ax.xaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
for s in [subplots[0], subplots[2]]:
ax = s["ax"]
assert ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_yticklabels())
for s in [subplots[1], subplots[3]]:
ax = s["ax"]
assert not ax.yaxis.get_label().get_visible()
assert not any(t.get_visible() for t in ax.get_yticklabels())
def test_2d_unshared(self):
p = (
Plot()
.facet(col=["a", "b"], row=["x", "y"])
.configure(sharex=False, sharey=False)
.plot()
)
subplots = list(p._subplots)
for s in subplots[:2]:
ax = s["ax"]
assert not ax.xaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
for s in subplots[2:]:
ax = s["ax"]
assert ax.xaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_xticklabels())
for s in [subplots[0], subplots[2]]:
ax = s["ax"]
assert ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_yticklabels())
for s in [subplots[1], subplots[3]]:
ax = s["ax"]
assert not ax.yaxis.get_label().get_visible()
assert all(t.get_visible() for t in ax.get_yticklabels())
class TestLegend:
@pytest.fixture
def xy(self):
return dict(x=[1, 2, 3, 4], y=[1, 2, 3, 4])
def test_single_layer_single_variable(self, xy):
s = pd.Series(["a", "b", "a", "c"], name="s")
p = Plot(**xy).add(MockMark(), color=s).plot()
e, = p._legend_contents
labels = categorical_order(s)
assert e[0] == (s.name, s.name)
assert e[-1] == labels
artists = e[1]
assert len(artists) == len(labels)
for a, label in zip(artists, labels):
assert isinstance(a, mpl.artist.Artist)
assert a.value == label
assert a.variables == ["color"]
def test_single_layer_common_variable(self, xy):
s = pd.Series(["a", "b", "a", "c"], name="s")
sem = dict(color=s, marker=s)
p = Plot(**xy).add(MockMark(), **sem).plot()
e, = p._legend_contents
labels = categorical_order(s)
assert e[0] == (s.name, s.name)
assert e[-1] == labels
artists = e[1]
assert len(artists) == len(labels)
for a, label in zip(artists, labels):
assert isinstance(a, mpl.artist.Artist)
assert a.value == label
assert a.variables == list(sem)
def test_single_layer_common_unnamed_variable(self, xy):
s = np.array(["a", "b", "a", "c"])
sem = dict(color=s, marker=s)
p = Plot(**xy).add(MockMark(), **sem).plot()
e, = p._legend_contents
labels = list(np.unique(s)) # assumes sorted order
assert e[0] == (None, id(s))
assert e[-1] == labels
artists = e[1]
assert len(artists) == len(labels)
for a, label in zip(artists, labels):
assert isinstance(a, mpl.artist.Artist)
assert a.value == label
assert a.variables == list(sem)
def test_single_layer_multi_variable(self, xy):
s1 = pd.Series(["a", "b", "a", "c"], name="s1")
s2 = pd.Series(["m", "m", "p", "m"], name="s2")
sem = dict(color=s1, marker=s2)
p = Plot(**xy).add(MockMark(), **sem).plot()
e1, e2 = p._legend_contents
variables = {v.name: k for k, v in sem.items()}
for e, s in zip([e1, e2], [s1, s2]):
assert e[0] == (s.name, s.name)
labels = categorical_order(s)
assert e[-1] == labels
artists = e[1]
assert len(artists) == len(labels)
for a, label in zip(artists, labels):
assert isinstance(a, mpl.artist.Artist)
assert a.value == label
assert a.variables == [variables[s.name]]
def test_multi_layer_single_variable(self, xy):
s = pd.Series(["a", "b", "a", "c"], name="s")
p = Plot(**xy, color=s).add(MockMark()).add(MockMark()).plot()
e1, e2 = p._legend_contents
labels = categorical_order(s)
for e in [e1, e2]:
assert e[0] == (s.name, s.name)
labels = categorical_order(s)
assert e[-1] == labels
artists = e[1]
assert len(artists) == len(labels)
for a, label in zip(artists, labels):
assert isinstance(a, mpl.artist.Artist)
assert a.value == label
assert a.variables == ["color"]
def test_multi_layer_multi_variable(self, xy):
s1 = pd.Series(["a", "b", "a", "c"], name="s1")
s2 = pd.Series(["m", "m", "p", "m"], name="s2")
sem = dict(color=s1), dict(marker=s2)
variables = {"s1": "color", "s2": "marker"}
p = Plot(**xy).add(MockMark(), **sem[0]).add(MockMark(), **sem[1]).plot()
e1, e2 = p._legend_contents
for e, s in zip([e1, e2], [s1, s2]):
assert e[0] == (s.name, s.name)
labels = categorical_order(s)
assert e[-1] == labels
artists = e[1]
assert len(artists) == len(labels)
for a, label in zip(artists, labels):
assert isinstance(a, mpl.artist.Artist)
assert a.value == label
assert a.variables == [variables[s.name]]
def test_multi_layer_different_artists(self, xy):
class MockMark1(MockMark):
def _legend_artist(self, variables, value, scales):
return mpl.lines.Line2D([], [])
class MockMark2(MockMark):
def _legend_artist(self, variables, value, scales):
return mpl.patches.Patch()
s = pd.Series(["a", "b", "a", "c"], name="s")
p = Plot(**xy, color=s).add(MockMark1()).add(MockMark2()).plot()
legend, = p._figure.legends
names = categorical_order(s)
labels = [t.get_text() for t in legend.get_texts()]
assert labels == names
if Version(mpl.__version__) >= Version("3.2"):
contents = legend.get_children()[0]
assert len(contents.findobj(mpl.lines.Line2D)) == len(names)
assert len(contents.findobj(mpl.patches.Patch)) == len(names)
def test_identity_scale_ignored(self, xy):
s = pd.Series(["r", "g", "b", "g"])
p = Plot(**xy).add(MockMark(), color=s).scale(color=None).plot()
assert not p._legend_contents
| [
"seaborn.external.version.Version",
"matplotlib.colors.to_rgba_array",
"numpy.array",
"pytest.fixture",
"pandas.testing.assert_frame_equal",
"matplotlib.lines.Line2D",
"numpy.reshape",
"pytest.mark.xfail",
"itertools.product",
"seaborn._core.plot.Plot",
"numpy.datetime64",
"pandas.DataFrame",
... | [((607, 683), 'functools.partial', 'functools.partial', (['assert_series_equal'], {'check_names': '(False)', 'check_dtype': '(False)'}), '(assert_series_equal, check_names=False, check_dtype=False)\n', (624, 683), False, 'import functools\n'), ((4253, 4295), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""var"""', "['x', 'y']"], {}), "('var', ['x', 'y'])\n", (4276, 4295), False, 'import pytest\n'), ((7585, 7642), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Need decision on default stat"""'}), "(reason='Need decision on default stat')\n", (7602, 7642), False, 'import pytest\n'), ((8188, 8281), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""arg,expected"""', "[('x', 'x'), ('y', 'y'), ('v', 'x'), ('h', 'y')]"], {}), "('arg,expected', [('x', 'x'), ('y', 'y'), ('v', 'x'),\n ('h', 'y')])\n", (8211, 8281), False, 'import pytest\n'), ((11173, 11241), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Temporal auto-conversion not implemented"""'}), "(reason='Temporal auto-conversion not implemented')\n", (11190, 11241), False, 'import pytest\n'), ((12127, 12202), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Custom log scale needs log name for consistency"""'}), "(reason='Custom log scale needs log name for consistency')\n", (12144, 12202), False, 'import pytest\n'), ((18233, 18335), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Correct output representation for color with identity scale undefined"""'}), "(reason=\n 'Correct output representation for color with identity scale undefined')\n", (18250, 18335), False, 'import pytest\n'), ((18998, 19097), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Need decision on what to do with scale defined for unused variable"""'}), "(reason=\n 'Need decision on what to do with scale defined for unused variable')\n", (19015, 19097), False, 'import pytest\n'), ((21824, 21880), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""split_var"""', "['color', 'group']"], {}), "('split_var', ['color', 'group'])\n", (21847, 21880), False, 'import pytest\n'), ((29044, 29101), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Plot.save not yet implemented"""'}), "(reason='Plot.save not yet implemented')\n", (29061, 29101), False, 'import pytest\n'), ((29368, 29415), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""facet"""', '[True, False]'], {}), "('facet', [True, False])\n", (29391, 29415), False, 'import pytest\n'), ((29823, 29870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""facet"""', '[True, False]'], {}), "('facet', [True, False])\n", (29846, 29870), False, 'import pytest\n'), ((31904, 31956), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""', 'params': "['row', 'col']"}), "(scope='class', params=['row', 'col'])\n", (31918, 31956), False, 'import pytest\n'), ((32020, 32089), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""', 'params': "['reverse', 'subset', 'expand']"}), "(scope='class', params=['reverse', 'subset', 'expand'])\n", (32034, 32089), False, 'import pytest\n'), ((37071, 37148), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""vector_type"""', '[list, np.array, pd.Series, pd.Index]'], {}), "('vector_type', [list, np.array, pd.Series, pd.Index])\n", (37094, 37148), False, 'import pytest\n'), ((37614, 37656), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', "['x', 'y']"], {}), "('dim', ['x', 'y'])\n", (37637, 37656), False, 'import pytest\n'), ((40022, 40093), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""variables"""', "[('rows', 'y'), ('columns', 'x')]"], {}), "('variables', [('rows', 'y'), ('columns', 'x')])\n", (40045, 40093), False, 'import pytest\n'), ((40455, 40526), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""variables"""', "[('columns', 'y'), ('rows', 'x')]"], {}), "('variables', [('columns', 'y'), ('rows', 'x')])\n", (40478, 40526), False, 'import pytest\n'), ((45191, 45293), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""facet_kws,pair_kws"""', "[({'col': 'b'}, {}), ({}, {'x': ['x', 'y', 'f']})]"], {}), "('facet_kws,pair_kws', [({'col': 'b'}, {}), ({}, {\n 'x': ['x', 'y', 'f']})])\n", (45214, 45293), False, 'import pytest\n'), ((46074, 46176), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""facet_kws,pair_kws"""', "[({'row': 'b'}, {}), ({}, {'y': ['x', 'y', 'f']})]"], {}), "('facet_kws,pair_kws', [({'row': 'b'}, {}), ({}, {\n 'y': ['x', 'y', 'f']})])\n", (46097, 46176), False, 'import pytest\n'), ((968, 992), 'seaborn.external.version.Version', 'Version', (['mpl.__version__'], {}), '(mpl.__version__)\n', (975, 992), False, 'from seaborn.external.version import Version\n'), ((995, 1009), 'seaborn.external.version.Version', 'Version', (['"""3.2"""'], {}), "('3.2')\n", (1002, 1009), False, 'from seaborn.external.version import Version\n'), ((1872, 1896), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[]', '[]'], {}), '([], [])\n', (1888, 1896), True, 'import matplotlib as mpl\n'), ((2028, 2034), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (2032, 2034), False, 'from seaborn._core.plot import Plot\n'), ((2172, 2185), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (2176, 2185), False, 'from seaborn._core.plot import Plot\n'), ((2380, 2406), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df, **variables)\n', (2384, 2406), False, 'from seaborn._core.plot import Plot\n'), ((2740, 2766), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df, **variables)\n', (2744, 2766), False, 'from seaborn._core.plot import Plot\n'), ((3228, 3245), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '(**variables)\n', (3232, 3245), False, 'from seaborn._core.plot import Plot\n'), ((3600, 3617), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '(**variables)\n', (3604, 3617), False, 'from seaborn._core.plot import Plot\n'), ((3939, 3957), 'seaborn._core.plot.Plot', 'Plot', ([], {'data': 'long_df'}), '(data=long_df)\n', (3943, 3957), False, 'from seaborn._core.plot import Plot\n'), ((4575, 4598), 'seaborn._core.plot.Plot', 'Plot', (['long_df', '"""a"""', '"""b"""'], {}), "(long_df, 'a', 'b')\n", (4579, 4598), False, 'from seaborn._core.plot import Plot\n'), ((4758, 4790), 'seaborn._core.plot.Plot', 'Plot', (["long_df['a']", "long_df['b']"], {}), "(long_df['a'], long_df['b'])\n", (4762, 4790), False, 'from seaborn._core.plot import Plot\n'), ((4950, 4968), 'seaborn._core.plot.Plot', 'Plot', (['long_df', '"""a"""'], {}), "(long_df, 'a')\n", (4954, 4968), False, 'from seaborn._core.plot import Plot\n'), ((5121, 5139), 'seaborn._core.plot.Plot', 'Plot', (["long_df['a']"], {}), "(long_df['a'])\n", (5125, 5139), False, 'from seaborn._core.plot import Plot\n'), ((5830, 5903), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['p._data.frame', "layer['data'].frame"], {'check_dtype': '(False)'}), "(p._data.frame, layer['data'].frame, check_dtype=False)\n", (5848, 5903), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((8986, 9013), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""', 'y': '"""y"""'}), "(long_df, x='x', y='y')\n", (8990, 9013), False, 'from seaborn._core.plot import Plot\n'), ((9758, 9764), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (9762, 9764), False, 'from seaborn._core.plot import Plot\n'), ((13519, 13550), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[col]'], {}), '(long_df[col])\n', (13536, 13550), False, 'from seaborn._core.rules import categorical_order\n'), ((17292, 17326), 'pandas.Series', 'pd.Series', (['[0.5, 0.1, 0.1, 0.9, 3]'], {}), '([0.5, 0.1, 0.1, 0.9, 3])\n', (17301, 17326), True, 'import pandas as pd\n'), ((22249, 22286), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[split_col]'], {}), '(long_df[split_col])\n', (22266, 22286), False, 'from seaborn._core.rules import categorical_order\n'), ((23394, 23431), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[split_col]'], {}), '(long_df[split_col])\n', (23411, 23431), False, 'from seaborn._core.rules import categorical_order\n'), ((24653, 24688), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[row_var]'], {}), '(long_df[row_var])\n', (24670, 24688), False, 'from seaborn._core.rules import categorical_order\n'), ((25075, 25098), 'itertools.product', 'itertools.product', (['x', 'y'], {}), '(x, y)\n', (25092, 25098), False, 'import itertools\n'), ((25878, 25911), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[group]'], {}), '(long_df[group])\n', (25895, 25911), False, 'from seaborn._core.rules import categorical_order\n'), ((25934, 25965), 'itertools.product', 'itertools.product', (['x', 'y', 'groups'], {}), '(x, y, groups)\n', (25951, 25965), False, 'import itertools\n'), ((26425, 26456), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[row]'], {}), '(long_df[row])\n', (26442, 26456), False, 'from seaborn._core.rules import categorical_order\n'), ((26479, 26507), 'itertools.product', 'itertools.product', (['x', 'facets'], {}), '(x, facets)\n', (26496, 26507), False, 'import itertools\n'), ((27199, 27235), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['long_df', 'orig_df'], {}), '(long_df, orig_df)\n', (27217, 27235), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((27709, 27732), 'seaborn._core.plot.Plot', 'Plot', (['long_df', '"""x"""', '"""y"""'], {}), "(long_df, 'x', 'y')\n", (27713, 27732), False, 'from seaborn._core.plot import Plot\n'), ((28161, 28170), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (28168, 28170), True, 'import matplotlib.pyplot as plt\n'), ((28242, 28248), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (28246, 28248), False, 'from seaborn._core.plot import Plot\n'), ((28472, 28481), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (28479, 28481), True, 'import matplotlib.pyplot as plt\n'), ((28774, 28780), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (28778, 28780), False, 'from seaborn._core.plot import Plot\n'), ((29466, 29485), 'matplotlib.figure.Figure', 'mpl.figure.Figure', ([], {}), '()\n', (29483, 29485), True, 'import matplotlib as mpl\n'), ((30259, 30265), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (30263, 30265), False, 'from seaborn._core.plot import Plot\n'), ((32396, 32429), 'seaborn._core.rules.categorical_order', 'categorical_order', (['df[key]', 'order'], {}), '(df[key], order)\n', (32413, 32429), False, 'from seaborn._core.rules import categorical_order\n'), ((33647, 33705), 'itertools.product', 'itertools.product', (["*[order[dim] for dim in ['row', 'col']]"], {}), "(*[order[dim] for dim in ['row', 'col']])\n", (33664, 33705), False, 'import itertools\n'), ((35594, 35628), 'numpy.reshape', 'np.reshape', (['p3._figure.axes', 'shape'], {}), '(p3._figure.axes, shape)\n', (35604, 35628), True, 'import numpy as np\n'), ((36755, 36778), 'itertools.product', 'itertools.product', (['y', 'x'], {}), '(y, x)\n', (36772, 36778), False, 'import itertools\n'), ((37439, 37452), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (37443, 37452), False, 'from seaborn._core.plot import Plot\n'), ((39637, 39668), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[col]'], {}), '(long_df[col])\n', (39654, 39668), False, 'from seaborn._core.rules import categorical_order\n'), ((39684, 39718), 'itertools.product', 'itertools.product', (['y', 'facet_levels'], {}), '(y, facet_levels)\n', (39701, 39718), False, 'import itertools\n'), ((41108, 41142), 'numpy.reshape', 'np.reshape', (['p1._figure.axes', 'shape'], {}), '(p1._figure.axes, shape)\n', (41118, 41142), True, 'import numpy as np\n'), ((42243, 42276), 'numpy.reshape', 'np.reshape', (['p._figure.axes', 'shape'], {}), '(p._figure.axes, shape)\n', (42253, 42276), True, 'import numpy as np\n'), ((44553, 44566), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (44557, 44566), False, 'from seaborn._core.plot import Plot\n'), ((51243, 51284), 'pandas.Series', 'pd.Series', (["['a', 'b', 'a', 'c']"], {'name': '"""s"""'}), "(['a', 'b', 'a', 'c'], name='s')\n", (51252, 51284), True, 'import pandas as pd\n'), ((51390, 51410), 'seaborn._core.rules.categorical_order', 'categorical_order', (['s'], {}), '(s)\n', (51407, 51410), False, 'from seaborn._core.rules import categorical_order\n'), ((51795, 51836), 'pandas.Series', 'pd.Series', (["['a', 'b', 'a', 'c']"], {'name': '"""s"""'}), "(['a', 'b', 'a', 'c'], name='s')\n", (51804, 51836), True, 'import pandas as pd\n'), ((51978, 51998), 'seaborn._core.rules.categorical_order', 'categorical_order', (['s'], {}), '(s)\n', (51995, 51998), False, 'from seaborn._core.rules import categorical_order\n'), ((52391, 52421), 'numpy.array', 'np.array', (["['a', 'b', 'a', 'c']"], {}), "(['a', 'b', 'a', 'c'])\n", (52399, 52421), True, 'import numpy as np\n'), ((52988, 53030), 'pandas.Series', 'pd.Series', (["['a', 'b', 'a', 'c']"], {'name': '"""s1"""'}), "(['a', 'b', 'a', 'c'], name='s1')\n", (52997, 53030), True, 'import pandas as pd\n'), ((53044, 53086), 'pandas.Series', 'pd.Series', (["['m', 'm', 'p', 'm']"], {'name': '"""s2"""'}), "(['m', 'm', 'p', 'm'], name='s2')\n", (53053, 53086), True, 'import pandas as pd\n'), ((53786, 53827), 'pandas.Series', 'pd.Series', (["['a', 'b', 'a', 'c']"], {'name': '"""s"""'}), "(['a', 'b', 'a', 'c'], name='s')\n", (53795, 53827), True, 'import pandas as pd\n'), ((53953, 53973), 'seaborn._core.rules.categorical_order', 'categorical_order', (['s'], {}), '(s)\n', (53970, 53973), False, 'from seaborn._core.rules import categorical_order\n'), ((54459, 54501), 'pandas.Series', 'pd.Series', (["['a', 'b', 'a', 'c']"], {'name': '"""s1"""'}), "(['a', 'b', 'a', 'c'], name='s1')\n", (54468, 54501), True, 'import pandas as pd\n'), ((54515, 54557), 'pandas.Series', 'pd.Series', (["['m', 'm', 'p', 'm']"], {'name': '"""s2"""'}), "(['m', 'm', 'p', 'm'], name='s2')\n", (54524, 54557), True, 'import pandas as pd\n'), ((55580, 55621), 'pandas.Series', 'pd.Series', (["['a', 'b', 'a', 'c']"], {'name': '"""s"""'}), "(['a', 'b', 'a', 'c'], name='s')\n", (55589, 55621), True, 'import pandas as pd\n'), ((55749, 55769), 'seaborn._core.rules.categorical_order', 'categorical_order', (['s'], {}), '(s)\n', (55766, 55769), False, 'from seaborn._core.rules import categorical_order\n'), ((56173, 56204), 'pandas.Series', 'pd.Series', (["['r', 'g', 'b', 'g']"], {}), "(['r', 'g', 'b', 'g'])\n", (56182, 56204), True, 'import pandas as pd\n'), ((4170, 4205), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'err'}), '(TypeError, match=err)\n', (4183, 4205), False, 'import pytest\n'), ((4219, 4246), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'data': 'long_df'}), '(long_df, data=long_df)\n', (4223, 4246), False, 'from seaborn._core.plot import Plot\n'), ((4425, 4460), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'err'}), '(TypeError, match=err)\n', (4438, 4460), False, 'import pytest\n'), ((4474, 4511), 'seaborn._core.plot.Plot', 'Plot', (['long_df', '"""a"""', '"""b"""'], {}), "(long_df, 'a', 'b', **{var: 'c'})\n", (4478, 4511), False, 'from seaborn._core.plot import Plot\n'), ((5382, 5417), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'err'}), '(TypeError, match=err)\n', (5395, 5417), False, 'import pytest\n'), ((5431, 5459), 'seaborn._core.plot.Plot', 'Plot', (['long_df', '"""x"""', '"""y"""', '"""z"""'], {}), "(long_df, 'x', 'y', 'z')\n", (5435, 5459), False, 'from seaborn._core.plot import Plot\n'), ((5589, 5624), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'err'}), '(TypeError, match=err)\n', (5602, 5624), False, 'import pytest\n'), ((5638, 5660), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'bad': '"""x"""'}), "(long_df, bad='x')\n", (5642, 5660), False, 'from seaborn._core.plot import Plot\n'), ((9778, 9840), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""mark must be a Mark instance"""'}), "(TypeError, match='mark must be a Mark instance')\n", (9791, 9840), False, 'import pytest\n'), ((9932, 9994), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""stat must be a Stat instance"""'}), "(TypeError, match='stat must be a Stat instance')\n", (9945, 9994), False, 'import pytest\n'), ((13902, 13926), 'seaborn.external.version.Version', 'Version', (['mpl.__version__'], {}), '(mpl.__version__)\n', (13909, 13926), False, 'from seaborn.external.version import Version\n'), ((13929, 13943), 'seaborn.external.version.Version', 'Version', (['"""3.3"""'], {}), "('3.3')\n", (13936, 13943), False, 'from seaborn.external.version import Version\n'), ((14407, 14436), 'pandas.Series', 'pd.Series', (['[0.0, 1.0]', '[0, 1]'], {}), '([0.0, 1.0], [0, 1])\n', (14416, 14436), True, 'import pandas as pd\n'), ((14487, 14516), 'pandas.Series', 'pd.Series', (['[0.0, 2.0]', '[2, 3]'], {}), '([0.0, 2.0], [2, 3])\n', (14496, 14516), True, 'import pandas as pd\n'), ((14943, 14972), 'pandas.Series', 'pd.Series', (['[0.0, 1.0]', '[0, 1]'], {}), '([0.0, 1.0], [0, 1])\n', (14952, 14972), True, 'import pandas as pd\n'), ((15023, 15052), 'pandas.Series', 'pd.Series', (['[0.0, 1.0]', '[2, 3]'], {}), '([0.0, 1.0], [2, 3])\n', (15032, 15052), True, 'import pandas as pd\n'), ((15707, 15736), 'pandas.Series', 'pd.Series', (['[0.0, 1.0]', '[0, 1]'], {}), '([0.0, 1.0], [0, 1])\n', (15716, 15736), True, 'import pandas as pd\n'), ((15787, 15816), 'pandas.Series', 'pd.Series', (['[0.0, 2.0]', '[2, 3]'], {}), '([0.0, 2.0], [2, 3])\n', (15796, 15816), True, 'import pandas as pd\n'), ((15867, 15904), 'pandas.Series', 'pd.Series', (['[0.0, 1.0, 2.0]', '[4, 5, 7]'], {}), '([0.0, 1.0, 2.0], [4, 5, 7])\n', (15876, 15904), True, 'import pandas as pd\n'), ((15954, 15975), 'pandas.Series', 'pd.Series', (['[2.0]', '[6]'], {}), '([2.0], [6])\n', (15963, 15975), True, 'import pandas as pd\n'), ((16376, 16405), 'pandas.Series', 'pd.Series', (['[0.0, 1.0]', '[0, 1]'], {}), '([0.0, 1.0], [0, 1])\n', (16385, 16405), True, 'import pandas as pd\n'), ((16456, 16485), 'pandas.Series', 'pd.Series', (['[0.0, 1.0]', '[0, 1]'], {}), '([0.0, 1.0], [0, 1])\n', (16465, 16485), True, 'import pandas as pd\n'), ((17066, 17095), 'pandas.Series', 'pd.Series', (['[0.0, 1.0]', '[0, 1]'], {}), '([0.0, 1.0], [0, 1])\n', (17075, 17095), True, 'import pandas as pd\n'), ((17146, 17175), 'pandas.Series', 'pd.Series', (['[0.0, 2.0]', '[0, 1]'], {}), '([0.0, 2.0], [0, 1])\n', (17155, 17175), True, 'import pandas as pd\n'), ((16518, 16542), 'seaborn.external.version.Version', 'Version', (['mpl.__version__'], {}), '(mpl.__version__)\n', (16525, 16542), False, 'from seaborn.external.version import Version\n'), ((16545, 16561), 'seaborn.external.version.Version', 'Version', (['"""3.4.0"""'], {}), "('3.4.0')\n", (16552, 16561), False, 'from seaborn.external.version import Version\n'), ((18560, 18587), 'matplotlib.colors.to_rgba_array', 'mpl.colors.to_rgba_array', (['c'], {}), '(c)\n', (18584, 18587), True, 'import matplotlib as mpl\n'), ((18891, 18918), 'matplotlib.colors.to_rgba_array', 'mpl.colors.to_rgba_array', (['c'], {}), '(c)\n', (18915, 18918), True, 'import matplotlib as mpl\n'), ((19318, 19356), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': 'err'}), '(RuntimeError, match=err)\n', (19331, 19356), False, 'import pytest\n'), ((20041, 20103), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['m.passed_data[0][col]', 'p._data.frame[col]'], {}), '(m.passed_data[0][col], p._data.frame[col])\n', (20060, 20103), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((21455, 21485), 'itertools.product', 'itertools.product', (['*split_keys'], {}), '(*split_keys)\n', (21472, 21485), False, 'import itertools\n'), ((21512, 21539), 'pandas.Series', 'pd.Series', (['(True)', 'data.index'], {}), '(True, data.index)\n', (21521, 21539), True, 'import pandas as pd\n'), ((22812, 22843), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[col]'], {}), '(long_df[col])\n', (22829, 22843), False, 'from seaborn._core.rules import categorical_order\n'), ((24054, 24085), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[col]'], {}), '(long_df[col])\n', (24071, 24085), False, 'from seaborn._core.rules import categorical_order\n'), ((27958, 27975), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (27973, 27975), True, 'import matplotlib.pyplot as plt\n'), ((28263, 28299), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (28286, 28299), False, 'import warnings\n'), ((28917, 28938), 'imghdr.what', 'imghdr.what', (['""""""', 'data'], {}), "('', data)\n", (28928, 28938), False, 'import imghdr\n'), ((29719, 29743), 'seaborn.external.version.Version', 'Version', (['mpl.__version__'], {}), '(mpl.__version__)\n', (29726, 29743), False, 'from seaborn.external.version import Version\n'), ((29746, 29760), 'seaborn.external.version.Version', 'Version', (['"""3.4"""'], {}), "('3.4')\n", (29753, 29760), False, 'from seaborn.external.version import Version\n'), ((30279, 30342), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""The `Plot.on`.+<class \'list\'>"""'}), '(TypeError, match="The `Plot.on`.+<class \'list\'>")\n', (30292, 30342), False, 'import pytest\n'), ((30518, 30586), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Cannot create multiple subplots"""'}), "(RuntimeError, match='Cannot create multiple subplots')\n", (30531, 30586), False, 'import pytest\n'), ((30682, 30750), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Cannot create multiple subplots"""'}), "(RuntimeError, match='Cannot create multiple subplots')\n", (30695, 30750), False, 'import pytest\n'), ((33263, 33294), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[key]'], {}), '(long_df[key])\n', (33280, 33294), False, 'from seaborn._core.rules import categorical_order\n'), ((37531, 37566), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'err'}), '(TypeError, match=err)\n', (37544, 37566), False, 'import pytest\n'), ((39368, 39425), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""You must pass `data`"""'}), "(RuntimeError, match='You must pass `data`')\n", (39381, 39425), False, 'import pytest\n'), ((40383, 40426), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': 'expected'}), '(RuntimeError, match=expected)\n', (40396, 40426), False, 'import pytest\n'), ((40873, 40916), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': 'expected'}), '(RuntimeError, match=expected)\n', (40886, 40916), False, 'import pytest\n'), ((44649, 44687), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': 'err'}), '(RuntimeError, match=err)\n', (44662, 44687), False, 'import pytest\n'), ((52569, 52581), 'numpy.unique', 'np.unique', (['s'], {}), '(s)\n', (52578, 52581), True, 'import numpy as np\n'), ((53385, 53405), 'seaborn._core.rules.categorical_order', 'categorical_order', (['s'], {}), '(s)\n', (53402, 53405), False, 'from seaborn._core.rules import categorical_order\n'), ((54068, 54088), 'seaborn._core.rules.categorical_order', 'categorical_order', (['s'], {}), '(s)\n', (54085, 54088), False, 'from seaborn._core.rules import categorical_order\n'), ((54886, 54906), 'seaborn._core.rules.categorical_order', 'categorical_order', (['s'], {}), '(s)\n', (54903, 54906), False, 'from seaborn._core.rules import categorical_order\n'), ((55873, 55897), 'seaborn.external.version.Version', 'Version', (['mpl.__version__'], {}), '(mpl.__version__)\n', (55880, 55897), False, 'from seaborn.external.version import Version\n'), ((55901, 55915), 'seaborn.external.version.Version', 'Version', (['"""3.2"""'], {}), "('3.2')\n", (55908, 55915), False, 'from seaborn.external.version import Version\n'), ((3713, 3727), 'pandas.Series', 'pd.Series', (['col'], {}), '(col)\n', (3722, 3727), True, 'import pandas as pd\n'), ((7763, 7769), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (7767, 7769), False, 'from seaborn._core.plot import Plot\n'), ((8048, 8054), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (8052, 8054), False, 'from seaborn._core.plot import Plot\n'), ((9069, 9082), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (9073, 9082), False, 'from seaborn._core.plot import Plot\n'), ((9168, 9199), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'y': '"""x"""', 'color': '"""a"""'}), "(long_df, y='x', color='a')\n", (9172, 9199), False, 'from seaborn._core.plot import Plot\n'), ((9287, 9325), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""', 'y': '"""y"""', 'color': '"""a"""'}), "(long_df, x='x', y='y', color='a')\n", (9291, 9325), False, 'from seaborn._core.plot import Plot\n'), ((9623, 9643), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'y': '"""x"""'}), "(long_df, y='x')\n", (9627, 9643), False, 'from seaborn._core.plot import Plot\n'), ((15301, 15348), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['x', 'row', 'col']"}), "(data, columns=['x', 'row', 'col'])\n", (15313, 15348), True, 'import pandas as pd\n'), ((16067, 16107), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['x1', 'x2']"}), "(data, columns=['x1', 'x2'])\n", (16079, 16107), True, 'import pandas as pd\n'), ((16741, 16781), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['x1', 'x2']"}), "(data, columns=['x1', 'x2'])\n", (16753, 16781), True, 'import pandas as pd\n'), ((19166, 19206), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': '[1, 2, 3]', 'color': "['a', 'b', 'c']"}), "(x=[1, 2, 3], color=['a', 'b', 'c'])\n", (19170, 19206), False, 'from seaborn._core.plot import Plot\n'), ((19215, 19227), 'seaborn._core.scales.Continuous', 'Continuous', ([], {}), '()\n', (19225, 19227), False, 'from seaborn._core.scales import Nominal, Continuous\n'), ((19462, 19468), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (19466, 19468), False, 'from seaborn._core.plot import Plot\n'), ((19679, 19685), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (19683, 19685), False, 'from seaborn._core.plot import Plot\n'), ((20980, 21041), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['mark.passed_data[i][var]', 'split_data[col]'], {}), '(mark.passed_data[i][var], split_data[col])\n', (20998, 21041), False, 'from numpy.testing import assert_array_equal\n'), ((21331, 21361), 'itertools.product', 'itertools.product', (['*split_keys'], {}), '(*split_keys)\n', (21348, 21361), False, 'import itertools\n'), ((21756, 21817), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['mark.passed_data[i][var]', 'split_data[col]'], {}), '(mark.passed_data[i][var], split_data[col])\n', (21774, 21817), False, 'from numpy.testing import assert_array_equal\n'), ((27924, 27930), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (27928, 27930), False, 'from seaborn._core.plot import Plot\n'), ((28078, 28084), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (28082, 28084), False, 'from seaborn._core.plot import Plot\n'), ((28123, 28140), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (28138, 28140), True, 'import matplotlib.pyplot as plt\n'), ((28434, 28451), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (28449, 28451), True, 'import matplotlib.pyplot as plt\n'), ((29136, 29142), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (29140, 29142), False, 'from seaborn._core.plot import Plot\n'), ((29193, 29212), 'matplotlib.figure.Figure', 'mpl.figure.Figure', ([], {}), '()\n', (29210, 29212), True, 'import matplotlib as mpl\n'), ((29931, 29950), 'matplotlib.figure.Figure', 'mpl.figure.Figure', ([], {}), '()\n', (29948, 29950), True, 'import matplotlib as mpl\n'), ((30428, 30447), 'matplotlib.figure.Figure', 'mpl.figure.Figure', ([], {}), '()\n', (30445, 30447), True, 'import matplotlib as mpl\n'), ((31366, 31372), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (31370, 31372), False, 'from seaborn._core.plot import Plot\n'), ((32890, 32903), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (32894, 32903), False, 'from seaborn._core.plot import Plot\n'), ((33061, 33074), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (33065, 33074), False, 'from seaborn._core.plot import Plot\n'), ((33308, 33321), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (33312, 33321), False, 'from seaborn._core.plot import Plot\n'), ((33567, 33593), 'seaborn._core.rules.categorical_order', 'categorical_order', (['df[key]'], {}), '(df[key])\n', (33584, 33593), False, 'from seaborn._core.rules import categorical_order\n'), ((34292, 34305), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (34296, 34305), False, 'from seaborn._core.plot import Plot\n'), ((34526, 34557), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[key]'], {}), '(long_df[key])\n', (34543, 34557), False, 'from seaborn._core.rules import categorical_order\n'), ((34628, 34641), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (34632, 34641), False, 'from seaborn._core.plot import Plot\n'), ((34842, 34855), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (34846, 34855), False, 'from seaborn._core.plot import Plot\n'), ((35452, 35496), 'seaborn._core.rules.categorical_order', 'categorical_order', (["long_df[variables['row']]"], {}), "(long_df[variables['row']])\n", (35469, 35496), False, 'from seaborn._core.rules import categorical_order\n'), ((35515, 35559), 'seaborn._core.rules.categorical_order', 'categorical_order', (["long_df[variables['col']]"], {}), "(long_df[variables['col']])\n", (35532, 35559), False, 'from seaborn._core.rules import categorical_order\n'), ((38629, 38642), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (38633, 38642), False, 'from seaborn._core.plot import Plot\n'), ((38862, 38882), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'y': '"""y"""'}), "(long_df, y='y')\n", (38866, 38882), False, 'from seaborn._core.plot import Plot\n'), ((39112, 39136), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'color': '"""a"""'}), "(long_df, color='a')\n", (39116, 39136), False, 'from seaborn._core.plot import Plot\n'), ((40995, 41008), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (40999, 41008), False, 'from seaborn._core.plot import Plot\n'), ((44861, 44884), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'x', 'y': 'y'}), '(long_df, x=x, y=y)\n', (44865, 44884), False, 'from seaborn._core.plot import Plot\n'), ((45437, 45460), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'x', 'y': 'y'}), '(long_df, x=x, y=y)\n', (45441, 45460), False, 'from seaborn._core.plot import Plot\n'), ((46317, 46340), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'x', 'y': 'y'}), '(long_df, x=x, y=y)\n', (46321, 46340), False, 'from seaborn._core.plot import Plot\n'), ((55399, 55423), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[]', '[]'], {}), '([], [])\n', (55415, 55423), True, 'import matplotlib as mpl\n'), ((55547, 55566), 'matplotlib.patches.Patch', 'mpl.patches.Patch', ([], {}), '()\n', (55564, 55566), True, 'import matplotlib as mpl\n'), ((13998, 14025), 'numpy.datetime64', 'np.datetime64', (['"""0000-12-31"""'], {}), "('0000-12-31')\n", (14011, 14025), True, 'import numpy as np\n'), ((22962, 22992), 'itertools.product', 'itertools.product', (['*split_keys'], {}), '(*split_keys)\n', (22979, 22992), False, 'import itertools\n'), ((24234, 24271), 'seaborn._core.rules.categorical_order', 'categorical_order', (['long_df[group_col]'], {}), '(long_df[group_col])\n', (24251, 24271), False, 'from seaborn._core.rules import categorical_order\n'), ((39439, 39445), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (39443, 39445), False, 'from seaborn._core.plot import Plot\n'), ((5744, 5771), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""', 'y': '"""y"""'}), "(long_df, x='x', y='y')\n", (5748, 5771), False, 'from seaborn._core.plot import Plot\n'), ((5973, 5993), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""'}), "(long_df, x='x')\n", (5977, 5993), False, 'from seaborn._core.plot import Plot\n'), ((6286, 6306), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""'}), "(long_df, x='x')\n", (6290, 6306), False, 'from seaborn._core.plot import Plot\n'), ((6606, 6612), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (6610, 6612), False, 'from seaborn._core.plot import Plot\n'), ((6970, 6997), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""', 'y': '"""y"""'}), "(long_df, x='x', y='y')\n", (6974, 6997), False, 'from seaborn._core.plot import Plot\n'), ((7344, 7371), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""', 'y': '"""y"""'}), "(long_df, x='x', y='y')\n", (7348, 7371), False, 'from seaborn._core.plot import Plot\n'), ((8768, 8798), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': '[1, 2, 3]', 'y': '[1, 2, 3]'}), '(x=[1, 2, 3], y=[1, 2, 3])\n', (8772, 8798), False, 'from seaborn._core.plot import Plot\n'), ((9432, 9459), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""', 'y': '"""y"""'}), "(long_df, x='x', y='y')\n", (9436, 9459), False, 'from seaborn._core.plot import Plot\n'), ((10399, 10405), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (10403, 10405), False, 'from seaborn._core.plot import Plot\n'), ((10665, 10700), 'pandas.Series', 'pd.Series', (["['a', 'b']"], {'index': '[2, 3]'}), "(['a', 'b'], index=[2, 3])\n", (10674, 10700), True, 'import pandas as pd\n'), ((10837, 10860), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': "['b', 'c', 'a']"}), "(x=['b', 'c', 'a'])\n", (10841, 10860), False, 'from seaborn._core.plot import Plot\n'), ((12251, 12257), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (12255, 12257), False, 'from seaborn._core.plot import Plot\n'), ((13466, 13486), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'col'}), '(long_df, x=col)\n', (13470, 13486), False, 'from seaborn._core.plot import Plot\n'), ((13799, 13819), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'col'}), '(long_df, x=col)\n', (13803, 13819), False, 'from seaborn._core.plot import Plot\n'), ((18085, 18094), 'seaborn._core.plot.Plot', 'Plot', ([], {'y': 'y'}), '(y=y)\n', (18089, 18094), False, 'from seaborn._core.plot import Plot\n'), ((19816, 19843), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""f"""', 'y': '"""z"""'}), "(long_df, x='f', y='z')\n", (19820, 19843), False, 'from seaborn._core.plot import Plot\n'), ((22186, 22212), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df, **data_vars)\n', (22190, 22212), False, 'from seaborn._core.plot import Plot\n'), ((22748, 22774), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df, **data_vars)\n', (22752, 22774), False, 'from seaborn._core.plot import Plot\n'), ((26997, 27024), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""z"""', 'y': '"""z"""'}), "(long_df, x='z', y='z')\n", (27001, 27024), False, 'from seaborn._core.plot import Plot\n'), ((29521, 29527), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (29525, 29527), False, 'from seaborn._core.plot import Plot\n'), ((30023, 30029), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (30027, 30029), False, 'from seaborn._core.plot import Plot\n'), ((30473, 30479), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (30477, 30479), False, 'from seaborn._core.plot import Plot\n'), ((30624, 30630), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (30628, 30630), False, 'from seaborn._core.plot import Plot\n'), ((30848, 30875), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""a"""', 'y': '"""b"""'}), "(long_df, x='a', y='b')\n", (30852, 30875), False, 'from seaborn._core.plot import Plot\n'), ((36172, 36178), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (36176, 36178), False, 'from seaborn._core.plot import Plot\n'), ((36468, 36474), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (36472, 36474), False, 'from seaborn._core.plot import Plot\n'), ((37273, 37286), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (37277, 37286), False, 'from seaborn._core.plot import Plot\n'), ((37805, 37818), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (37809, 37818), False, 'from seaborn._core.plot import Plot\n'), ((38068, 38081), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (38072, 38081), False, 'from seaborn._core.plot import Plot\n'), ((40211, 40224), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (40215, 40224), False, 'from seaborn._core.plot import Plot\n'), ((40657, 40670), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (40661, 40670), False, 'from seaborn._core.plot import Plot\n'), ((43046, 43066), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'y': '"""y"""'}), "(long_df, y='y')\n", (43050, 43066), False, 'from seaborn._core.plot import Plot\n'), ((43388, 43408), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""'}), "(long_df, x='x')\n", (43392, 43408), False, 'from seaborn._core.plot import Plot\n'), ((43791, 43811), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""'}), "(long_df, x='x')\n", (43795, 43811), False, 'from seaborn._core.plot import Plot\n'), ((47000, 47006), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (47004, 47006), False, 'from seaborn._core.plot import Plot\n'), ((47864, 47870), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (47868, 47870), False, 'from seaborn._core.plot import Plot\n'), ((48751, 48764), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (48755, 48764), False, 'from seaborn._core.plot import Plot\n'), ((49212, 49218), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (49216, 49218), False, 'from seaborn._core.plot import Plot\n'), ((51297, 51307), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '(**xy)\n', (51301, 51307), False, 'from seaborn._core.plot import Plot\n'), ((51887, 51897), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '(**xy)\n', (51891, 51897), False, 'from seaborn._core.plot import Plot\n'), ((52472, 52482), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '(**xy)\n', (52476, 52482), False, 'from seaborn._core.plot import Plot\n'), ((53139, 53149), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '(**xy)\n', (53143, 53149), False, 'from seaborn._core.plot import Plot\n'), ((10196, 10223), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'col', 'y': 'col'}), '(long_df, x=col, y=col)\n', (10200, 10223), False, 'from seaborn._core.plot import Plot\n'), ((10615, 10632), 'pandas.Series', 'pd.Series', (['[1, 2]'], {}), '([1, 2])\n', (10624, 10632), True, 'import pandas as pd\n'), ((11028, 11045), 'seaborn._core.plot.Plot', 'Plot', ([], {'y': '[2, 1, 3]'}), '(y=[2, 1, 3])\n', (11032, 11045), False, 'from seaborn._core.plot import Plot\n'), ((11054, 11063), 'seaborn._core.scales.Nominal', 'Nominal', ([], {}), '()\n', (11061, 11063), False, 'from seaborn._core.scales import Nominal, Continuous\n'), ((11358, 11371), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': 'dates'}), '(x=dates)\n', (11362, 11371), False, 'from seaborn._core.plot import Plot\n'), ((11488, 11503), 'seaborn._core.plot.Plot', 'Plot', ([], {'y': '[1, 10]'}), '(y=[1, 10])\n', (11492, 11503), False, 'from seaborn._core.plot import Plot\n'), ((11795, 11801), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (11799, 11801), False, 'from seaborn._core.plot import Plot\n'), ((12510, 12530), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'col'}), '(long_df, x=col)\n', (12514, 12530), False, 'from seaborn._core.plot import Plot\n'), ((13018, 13049), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'grouper', 'y': 'col'}), '(long_df, x=grouper, y=col)\n', (13022, 13049), False, 'from seaborn._core.plot import Plot\n'), ((14163, 14191), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': "['a', 'b', 'a', 'c']"}), "(x=['a', 'b', 'a', 'c'])\n", (14167, 14191), False, 'from seaborn._core.plot import Plot\n'), ((16155, 16170), 'seaborn._core.plot.Plot', 'Plot', (['df'], {'y': '"""y"""'}), "(df, y='y')\n", (16159, 16170), False, 'from seaborn._core.plot import Plot\n'), ((17331, 17359), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': 'x', 'y': 'y', 'linewidth': 'lw'}), '(x=x, y=y, linewidth=lw)\n', (17335, 17359), False, 'from seaborn._core.plot import Plot\n'), ((17718, 17731), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (17722, 17731), False, 'from seaborn._core.plot import Plot\n'), ((18485, 18508), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': 'x', 'y': 'y', 'color': 'c'}), '(x=x, y=y, color=c)\n', (18489, 18508), False, 'from seaborn._core.plot import Plot\n'), ((18816, 18839), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': 'x', 'y': 'y', 'color': 'c'}), '(x=x, y=y, color=c)\n', (18820, 18839), False, 'from seaborn._core.plot import Plot\n'), ((20373, 20386), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (20377, 20386), False, 'from seaborn._core.plot import Plot\n'), ((23299, 23325), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df, **data_vars)\n', (23303, 23325), False, 'from seaborn._core.plot import Plot\n'), ((23909, 23935), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df, **data_vars)\n', (23913, 23935), False, 'from seaborn._core.plot import Plot\n'), ((24561, 24587), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df, **axis_vars)\n', (24565, 24587), False, 'from seaborn._core.plot import Plot\n'), ((25013, 25026), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (25017, 25026), False, 'from seaborn._core.plot import Plot\n'), ((25414, 25427), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (25418, 25427), False, 'from seaborn._core.plot import Plot\n'), ((25808, 25834), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'group': 'group'}), '(long_df, group=group)\n', (25812, 25834), False, 'from seaborn._core.plot import Plot\n'), ((27485, 27512), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""z"""', 'y': '"""z"""'}), "(long_df, x='z', y='z')\n", (27489, 27512), False, 'from seaborn._core.plot import Plot\n'), ((29259, 29265), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (29263, 29265), False, 'from seaborn._core.plot import Plot\n'), ((31222, 31235), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {}), '(long_df)\n', (31226, 31235), False, 'from seaborn._core.plot import Plot\n'), ((39566, 39584), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': 'x'}), '(long_df, x=x)\n', (39570, 39584), False, 'from seaborn._core.plot import Plot\n'), ((42137, 42157), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'y': '"""y"""'}), "(long_df, y='y')\n", (42141, 42157), False, 'from seaborn._core.plot import Plot\n'), ((44297, 44317), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'x': '"""x"""'}), "(long_df, x='x')\n", (44301, 44317), False, 'from seaborn._core.plot import Plot\n'), ((50129, 50135), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '()\n', (50133, 50135), False, 'from seaborn._core.plot import Plot\n'), ((53840, 53859), 'seaborn._core.plot.Plot', 'Plot', ([], {'color': 's'}), '(**xy, color=s)\n', (53844, 53859), False, 'from seaborn._core.plot import Plot\n'), ((54668, 54678), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '(**xy)\n', (54672, 54678), False, 'from seaborn._core.plot import Plot\n'), ((55634, 55653), 'seaborn._core.plot.Plot', 'Plot', ([], {'color': 's'}), '(**xy, color=s)\n', (55638, 55653), False, 'from seaborn._core.plot import Plot\n'), ((56217, 56227), 'seaborn._core.plot.Plot', 'Plot', ([], {}), '(**xy)\n', (56221, 56227), False, 'from seaborn._core.plot import Plot\n'), ((10559, 10582), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (10568, 10582), True, 'import pandas as pd\n'), ((14613, 14641), 'seaborn._core.plot.Plot', 'Plot', ([], {'x': "['a', 'b', 'a', 'c']"}), "(x=['a', 'b', 'a', 'c'])\n", (14617, 14641), False, 'from seaborn._core.plot import Plot\n'), ((15410, 15425), 'seaborn._core.plot.Plot', 'Plot', (['df'], {'x': '"""x"""'}), "(df, x='x')\n", (15414, 15425), False, 'from seaborn._core.plot import Plot\n'), ((16829, 16844), 'seaborn._core.plot.Plot', 'Plot', (['df'], {'y': '"""y"""'}), "(df, y='y')\n", (16833, 16844), False, 'from seaborn._core.plot import Plot\n'), ((26351, 26369), 'seaborn._core.plot.Plot', 'Plot', (['long_df'], {'y': 'y'}), '(long_df, y=y)\n', (26355, 26369), False, 'from seaborn._core.plot import Plot\n')] |
import logging
import math
import pathlib
import typing as t
import numpy as np
import pandas as pd
from shapely.geometry import Point, LineString
from svglib.svglib import svg2rlg
from reportlab.graphics.shapes import Group, Rect, Path
from reportlab.lib.colors import Color
from .geometry import cartesian, polar, closest_intersection
_LOG = logging.getLogger(__name__)
def extract_group_shapes_recursive(group):
extracted = []
for elem in group.contents:
if isinstance(elem, Group):
extracted += extract_group_shapes_recursive(elem)
elif isinstance(elem, (Rect, Path)):
extracted.append(elem)
else:
raise NotImplementedError(f'Handling of {type(elem)} is not implemented yet!')
return extracted
def extract_vectors(path: pathlib.Path):
drawing = svg2rlg(str(path))
elems = extract_group_shapes_recursive(drawing)
walls = []
flight = []
for elem in elems:
if isinstance(elem, Rect):
elem_lines = [
LineString([(elem.x, elem.y), (elem.x + elem.width, elem.y)]),
LineString([(elem.x, elem.y), (elem.x, elem.y + elem.height)]),
LineString([(elem.x, elem.y + elem.height),
(elem.x + elem.width, elem.y + elem.height)]),
LineString([(elem.x + elem.width, elem.y),
(elem.x + elem.width, elem.y + elem.height)]),
]
elif isinstance(elem, Path):
elem_lines = [
LineString([(x1, y1), (x2, y2)])
for x1, y1, x2, y2 in zip(
elem.points[:-2:2], elem.points[1:-2:2],
elem.points[2::2], elem.points[3::2])]
else:
raise NotImplementedError(f'Handling of {type(elem)} is not implemented yet!')
if elem.strokeColor == Color(0, 0, 0, 1):
walls += elem_lines
else:
flight += elem_lines
assert flight
flight_points = [Point(line.coords[0]) for line in flight]
flight_points.append(Point(flight[-1].coords[1]))
return walls, flight_points
def postprocess_flight_data(flight_data: pd.DataFrame) -> t.List[Point]:
return[Point(row['x'], -row['y']) for index, row in flight_data.iterrows()]
def simulate_flight(walls: t.List[LineString], sweep_locations: t.List[Point],
skip_flight_data: bool = False):
if not skip_flight_data:
flight_data = pd.DataFrame(
data=[[point.x, -point.y] for point in sweep_locations],
columns=['x', 'y'], index=list(range(len(sweep_locations))))
_LOG.debug('GPS points: %i', len(flight_data))
angles = np.linspace(-np.pi, np.pi, 500)
cartesians = [cartesian(1000, radians) for radians in angles]
lidar_data: t.Dict[int, t.Dict[float, float]] = {}
for i, point in enumerate(sweep_locations):
lidar_data[i] = {}
for x, y in cartesians:
ray = LineString([(point.x, point.y), (point.x + x, point.y + y)])
intersection = closest_intersection(ray, walls)
if intersection is None:
continue
distance, radians = polar(intersection.x - point.x, intersection.y - point.y)
lidar_data[i][math.degrees(radians)] = distance
assert len(lidar_data[i]) <= 500
_LOG.debug('LIDAR points: %i', len(lidar_data))
if not skip_flight_data:
assert len(flight_data) == len(lidar_data)
return flight_data, lidar_data
return lidar_data
| [
"logging.getLogger",
"reportlab.lib.colors.Color",
"math.degrees",
"shapely.geometry.Point",
"numpy.linspace",
"shapely.geometry.LineString"
] | [((347, 374), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (364, 374), False, 'import logging\n'), ((2708, 2739), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(500)'], {}), '(-np.pi, np.pi, 500)\n', (2719, 2739), True, 'import numpy as np\n'), ((2013, 2034), 'shapely.geometry.Point', 'Point', (['line.coords[0]'], {}), '(line.coords[0])\n', (2018, 2034), False, 'from shapely.geometry import Point, LineString\n'), ((2080, 2107), 'shapely.geometry.Point', 'Point', (['flight[-1].coords[1]'], {}), '(flight[-1].coords[1])\n', (2085, 2107), False, 'from shapely.geometry import Point, LineString\n'), ((2228, 2254), 'shapely.geometry.Point', 'Point', (["row['x']", "(-row['y'])"], {}), "(row['x'], -row['y'])\n", (2233, 2254), False, 'from shapely.geometry import Point, LineString\n'), ((1874, 1891), 'reportlab.lib.colors.Color', 'Color', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (1879, 1891), False, 'from reportlab.lib.colors import Color\n'), ((2988, 3048), 'shapely.geometry.LineString', 'LineString', (['[(point.x, point.y), (point.x + x, point.y + y)]'], {}), '([(point.x, point.y), (point.x + x, point.y + y)])\n', (2998, 3048), False, 'from shapely.geometry import Point, LineString\n'), ((1036, 1097), 'shapely.geometry.LineString', 'LineString', (['[(elem.x, elem.y), (elem.x + elem.width, elem.y)]'], {}), '([(elem.x, elem.y), (elem.x + elem.width, elem.y)])\n', (1046, 1097), False, 'from shapely.geometry import Point, LineString\n'), ((1115, 1177), 'shapely.geometry.LineString', 'LineString', (['[(elem.x, elem.y), (elem.x, elem.y + elem.height)]'], {}), '([(elem.x, elem.y), (elem.x, elem.y + elem.height)])\n', (1125, 1177), False, 'from shapely.geometry import Point, LineString\n'), ((1195, 1288), 'shapely.geometry.LineString', 'LineString', (['[(elem.x, elem.y + elem.height), (elem.x + elem.width, elem.y + elem.height)]'], {}), '([(elem.x, elem.y + elem.height), (elem.x + elem.width, elem.y +\n elem.height)])\n', (1205, 1288), False, 'from shapely.geometry import Point, LineString\n'), ((1330, 1422), 'shapely.geometry.LineString', 'LineString', (['[(elem.x + elem.width, elem.y), (elem.x + elem.width, elem.y + elem.height)]'], {}), '([(elem.x + elem.width, elem.y), (elem.x + elem.width, elem.y +\n elem.height)])\n', (1340, 1422), False, 'from shapely.geometry import Point, LineString\n'), ((3287, 3308), 'math.degrees', 'math.degrees', (['radians'], {}), '(radians)\n', (3299, 3308), False, 'import math\n'), ((1542, 1574), 'shapely.geometry.LineString', 'LineString', (['[(x1, y1), (x2, y2)]'], {}), '([(x1, y1), (x2, y2)])\n', (1552, 1574), False, 'from shapely.geometry import Point, LineString\n')] |
import astropy.units as u
import exifread
import matplotlib
import numpy as np
import scipy.ndimage as ndimage
from skimage.transform import hough_circle, hough_circle_peaks
from sunpy.map import GenericMap
import eclipse.meta as m
__all__ = ['find_sun_center_and_radius', 'eclipse_image_to_map']
def find_sun_center_and_radius(im):
"""
Given an image of the eclipsed Sun find the center and radius of the
image.
Parameters
----------
im : `numpy.ndarray`
The image.
Returns
-------
im_cx : `astropy.units.Quantity`
The x coordinate of the centre of the disk.
im_cy : `astropy.units.Quantity`
The y coordinate of the centre of the disk.
im_radius : `astropy.units.Quantity`
The radius of the disk.
"""
blur_im = ndimage.gaussian_filter(im, 8)
mask = blur_im > blur_im.mean() * 3
# the following code limits the region to search for the circle of the Sun
label_im, nb_labels = ndimage.label(mask)
slice_x, slice_y = ndimage.find_objects(label_im == 1)[0]
roi = blur_im[slice_x, slice_y]
# take the derivative of the image to find the edges of the Sun
sx = ndimage.sobel(roi, axis=0, mode='constant')
sy = ndimage.sobel(roi, axis=1, mode='constant')
sob = np.hypot(sx, sy)
hough_radii = np.arange(np.floor(np.mean(sob.shape) / 4),
np.ceil(np.mean(sob.shape) / 2), 10)
hough_res = hough_circle(sob > (sob.mean() * 5), hough_radii)
# Select the most prominent circle
accums, cy, cx, radius = hough_circle_peaks(hough_res, hough_radii,
total_num_peaks=1)
im_cx = (cx + slice_x.start) * u.pix
im_cy = (cy + slice_y.start) * u.pix
im_radius = radius * u.pix
return im_cx, im_cy, im_radius
def eclipse_image_to_map(filename):
"""
Given the filename to a photo, convert it to a `sunpy.map.GenericMap` object.
Parameters
----------
filename : `str`
The filename of the image.
Returns
-------
sunpymap : `sunpy.map.GenericMap`
A SunPy map with valid metadata for the image.
"""
# load the image data
im_rgb = np.flipud(matplotlib.image.imread(filename))
# remove the color information
im = np.average(im_rgb, axis=2)
# find the sun center and radius
im_cx, im_cy, im_radius = find_sun_center_and_radius(im)
tags = exifread.process_file(open(filename, 'rb'))
time = m.get_image_time(tags)
###############################################################################
# With the time and the radius of the solar disk we can calculate the plate
# scale.
plate_scale = m.get_plate_scale(time, im_radius)
###############################################################################
# We can now build a WCS object and a meta dictionary. We then append a few
# more meta tags to the meta dictionary.
wcs = m.build_wcs(im_cx, im_cy, plate_scale)
meta = m.build_meta(wcs, tags)
return GenericMap(data=im, header=meta)
| [
"numpy.mean",
"numpy.average",
"eclipse.meta.build_meta",
"eclipse.meta.get_image_time",
"eclipse.meta.build_wcs",
"matplotlib.image.imread",
"scipy.ndimage.label",
"skimage.transform.hough_circle_peaks",
"scipy.ndimage.find_objects",
"scipy.ndimage.sobel",
"sunpy.map.GenericMap",
"eclipse.met... | [((808, 838), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['im', '(8)'], {}), '(im, 8)\n', (831, 838), True, 'import scipy.ndimage as ndimage\n'), ((985, 1004), 'scipy.ndimage.label', 'ndimage.label', (['mask'], {}), '(mask)\n', (998, 1004), True, 'import scipy.ndimage as ndimage\n'), ((1181, 1224), 'scipy.ndimage.sobel', 'ndimage.sobel', (['roi'], {'axis': '(0)', 'mode': '"""constant"""'}), "(roi, axis=0, mode='constant')\n", (1194, 1224), True, 'import scipy.ndimage as ndimage\n'), ((1234, 1277), 'scipy.ndimage.sobel', 'ndimage.sobel', (['roi'], {'axis': '(1)', 'mode': '"""constant"""'}), "(roi, axis=1, mode='constant')\n", (1247, 1277), True, 'import scipy.ndimage as ndimage\n'), ((1288, 1304), 'numpy.hypot', 'np.hypot', (['sx', 'sy'], {}), '(sx, sy)\n', (1296, 1304), True, 'import numpy as np\n'), ((1568, 1629), 'skimage.transform.hough_circle_peaks', 'hough_circle_peaks', (['hough_res', 'hough_radii'], {'total_num_peaks': '(1)'}), '(hough_res, hough_radii, total_num_peaks=1)\n', (1586, 1629), False, 'from skimage.transform import hough_circle, hough_circle_peaks\n'), ((2298, 2324), 'numpy.average', 'np.average', (['im_rgb'], {'axis': '(2)'}), '(im_rgb, axis=2)\n', (2308, 2324), True, 'import numpy as np\n'), ((2491, 2513), 'eclipse.meta.get_image_time', 'm.get_image_time', (['tags'], {}), '(tags)\n', (2507, 2513), True, 'import eclipse.meta as m\n'), ((2710, 2744), 'eclipse.meta.get_plate_scale', 'm.get_plate_scale', (['time', 'im_radius'], {}), '(time, im_radius)\n', (2727, 2744), True, 'import eclipse.meta as m\n'), ((2965, 3003), 'eclipse.meta.build_wcs', 'm.build_wcs', (['im_cx', 'im_cy', 'plate_scale'], {}), '(im_cx, im_cy, plate_scale)\n', (2976, 3003), True, 'import eclipse.meta as m\n'), ((3015, 3038), 'eclipse.meta.build_meta', 'm.build_meta', (['wcs', 'tags'], {}), '(wcs, tags)\n', (3027, 3038), True, 'import eclipse.meta as m\n'), ((3050, 3082), 'sunpy.map.GenericMap', 'GenericMap', ([], {'data': 'im', 'header': 'meta'}), '(data=im, header=meta)\n', (3060, 3082), False, 'from sunpy.map import GenericMap\n'), ((1028, 1063), 'scipy.ndimage.find_objects', 'ndimage.find_objects', (['(label_im == 1)'], {}), '(label_im == 1)\n', (1048, 1063), True, 'import scipy.ndimage as ndimage\n'), ((2219, 2252), 'matplotlib.image.imread', 'matplotlib.image.imread', (['filename'], {}), '(filename)\n', (2242, 2252), False, 'import matplotlib\n'), ((1343, 1361), 'numpy.mean', 'np.mean', (['sob.shape'], {}), '(sob.shape)\n', (1350, 1361), True, 'import numpy as np\n'), ((1404, 1422), 'numpy.mean', 'np.mean', (['sob.shape'], {}), '(sob.shape)\n', (1411, 1422), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Try continous collision checking for a simple path through an obstacle.
"""
import time
import fcl
import numpy as np
import matplotlib.pyplot as plt
from acrolib.plotting import get_default_axes3d, plot_reference_frame
from acrolib.geometry import translation
from acrobotics.robot_examples import Kuka
from acrobotics.tool_examples import torch2
from acrobotics.geometry import Scene
from acrobotics.shapes import Box
robot = Kuka()
robot.tool = torch2
DEBUG = False
def show_animation(robot, scene, qa, qb):
q_path = np.linspace(qa, qb, 10)
fig, ax = get_default_axes3d([-0.8, 0.8], [0, 1.6], [-0.2, 1.4])
ax.set_axis_off()
ax.view_init(elev=31, azim=-15)
scene.plot(ax, c="green")
robot.animate_path(fig, ax, q_path)
plt.show()
def test_ccd_1():
table = Box(2, 2, 0.1)
T_table = translation(0, 0, -0.2)
obstacle = Box(0.01, 0.01, 1.5)
T_obs = translation(0, 0.5, 0.55)
scene = Scene([table, obstacle], [T_table, T_obs])
q_start = np.array([1.0, 1.5, -0.3, 0, 0, 0])
q_goal = np.array([2.0, 1.5, 0.3, 0, 0, 0])
res = robot.is_path_in_collision(q_start, q_goal, scene)
assert res
if DEBUG:
print("resut test 1: ", res)
show_animation(robot, scene, q_start, q_goal)
def test_ccd_2():
table = Box(2, 2, 0.1)
T_table = translation(0, 0, -0.2)
obstacle = Box(0.2, 0.1, 0.01)
T_obs = translation(0, 0.9, 0.55)
scene = Scene([table, obstacle], [T_table, T_obs])
q_start = np.array([1.5, 1.5, -0.3, 0, 0.3, 0])
q_goal = np.array([1.5, 1.5, 0.3, 0, -0.3, 0])
res = robot.is_path_in_collision(q_start, q_goal, scene)
assert res
if DEBUG:
print("resut test 2: ", res)
show_animation(robot, scene, q_start, q_goal)
def test_ccd_3():
table = Box(2, 2, 0.1)
T_table = translation(0, 0, -0.2)
obstacle = Box(0.01, 0.2, 0.2)
T_obs = translation(0, 1.2, 0)
scene = Scene([table, obstacle], [T_table, T_obs])
q_start = np.array([1.0, 1.2, -0.5, 0, 0, 0])
q_goal = np.array([2.0, 1.2, -0.5, 0, 0, 0])
res = robot.is_path_in_collision(q_start, q_goal, scene)
assert res
if DEBUG:
print("resut test 3: ", res)
show_animation(robot, scene, q_start, q_goal)
if __name__ == "__main__":
test_ccd_1()
test_ccd_2()
test_ccd_3()
| [
"acrolib.geometry.translation",
"acrobotics.shapes.Box",
"acrobotics.robot_examples.Kuka",
"acrolib.plotting.get_default_axes3d",
"numpy.array",
"numpy.linspace",
"acrobotics.geometry.Scene",
"matplotlib.pyplot.show"
] | [((460, 466), 'acrobotics.robot_examples.Kuka', 'Kuka', ([], {}), '()\n', (464, 466), False, 'from acrobotics.robot_examples import Kuka\n'), ((559, 582), 'numpy.linspace', 'np.linspace', (['qa', 'qb', '(10)'], {}), '(qa, qb, 10)\n', (570, 582), True, 'import numpy as np\n'), ((597, 651), 'acrolib.plotting.get_default_axes3d', 'get_default_axes3d', (['[-0.8, 0.8]', '[0, 1.6]', '[-0.2, 1.4]'], {}), '([-0.8, 0.8], [0, 1.6], [-0.2, 1.4])\n', (615, 651), False, 'from acrolib.plotting import get_default_axes3d, plot_reference_frame\n'), ((784, 794), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (792, 794), True, 'import matplotlib.pyplot as plt\n'), ((827, 841), 'acrobotics.shapes.Box', 'Box', (['(2)', '(2)', '(0.1)'], {}), '(2, 2, 0.1)\n', (830, 841), False, 'from acrobotics.shapes import Box\n'), ((856, 879), 'acrolib.geometry.translation', 'translation', (['(0)', '(0)', '(-0.2)'], {}), '(0, 0, -0.2)\n', (867, 879), False, 'from acrolib.geometry import translation\n'), ((895, 915), 'acrobotics.shapes.Box', 'Box', (['(0.01)', '(0.01)', '(1.5)'], {}), '(0.01, 0.01, 1.5)\n', (898, 915), False, 'from acrobotics.shapes import Box\n'), ((928, 953), 'acrolib.geometry.translation', 'translation', (['(0)', '(0.5)', '(0.55)'], {}), '(0, 0.5, 0.55)\n', (939, 953), False, 'from acrolib.geometry import translation\n'), ((966, 1008), 'acrobotics.geometry.Scene', 'Scene', (['[table, obstacle]', '[T_table, T_obs]'], {}), '([table, obstacle], [T_table, T_obs])\n', (971, 1008), False, 'from acrobotics.geometry import Scene\n'), ((1023, 1058), 'numpy.array', 'np.array', (['[1.0, 1.5, -0.3, 0, 0, 0]'], {}), '([1.0, 1.5, -0.3, 0, 0, 0])\n', (1031, 1058), True, 'import numpy as np\n'), ((1072, 1106), 'numpy.array', 'np.array', (['[2.0, 1.5, 0.3, 0, 0, 0]'], {}), '([2.0, 1.5, 0.3, 0, 0, 0])\n', (1080, 1106), True, 'import numpy as np\n'), ((1322, 1336), 'acrobotics.shapes.Box', 'Box', (['(2)', '(2)', '(0.1)'], {}), '(2, 2, 0.1)\n', (1325, 1336), False, 'from acrobotics.shapes import Box\n'), ((1351, 1374), 'acrolib.geometry.translation', 'translation', (['(0)', '(0)', '(-0.2)'], {}), '(0, 0, -0.2)\n', (1362, 1374), False, 'from acrolib.geometry import translation\n'), ((1390, 1409), 'acrobotics.shapes.Box', 'Box', (['(0.2)', '(0.1)', '(0.01)'], {}), '(0.2, 0.1, 0.01)\n', (1393, 1409), False, 'from acrobotics.shapes import Box\n'), ((1422, 1447), 'acrolib.geometry.translation', 'translation', (['(0)', '(0.9)', '(0.55)'], {}), '(0, 0.9, 0.55)\n', (1433, 1447), False, 'from acrolib.geometry import translation\n'), ((1460, 1502), 'acrobotics.geometry.Scene', 'Scene', (['[table, obstacle]', '[T_table, T_obs]'], {}), '([table, obstacle], [T_table, T_obs])\n', (1465, 1502), False, 'from acrobotics.geometry import Scene\n'), ((1517, 1554), 'numpy.array', 'np.array', (['[1.5, 1.5, -0.3, 0, 0.3, 0]'], {}), '([1.5, 1.5, -0.3, 0, 0.3, 0])\n', (1525, 1554), True, 'import numpy as np\n'), ((1568, 1605), 'numpy.array', 'np.array', (['[1.5, 1.5, 0.3, 0, -0.3, 0]'], {}), '([1.5, 1.5, 0.3, 0, -0.3, 0])\n', (1576, 1605), True, 'import numpy as np\n'), ((1821, 1835), 'acrobotics.shapes.Box', 'Box', (['(2)', '(2)', '(0.1)'], {}), '(2, 2, 0.1)\n', (1824, 1835), False, 'from acrobotics.shapes import Box\n'), ((1850, 1873), 'acrolib.geometry.translation', 'translation', (['(0)', '(0)', '(-0.2)'], {}), '(0, 0, -0.2)\n', (1861, 1873), False, 'from acrolib.geometry import translation\n'), ((1889, 1908), 'acrobotics.shapes.Box', 'Box', (['(0.01)', '(0.2)', '(0.2)'], {}), '(0.01, 0.2, 0.2)\n', (1892, 1908), False, 'from acrobotics.shapes import Box\n'), ((1921, 1943), 'acrolib.geometry.translation', 'translation', (['(0)', '(1.2)', '(0)'], {}), '(0, 1.2, 0)\n', (1932, 1943), False, 'from acrolib.geometry import translation\n'), ((1956, 1998), 'acrobotics.geometry.Scene', 'Scene', (['[table, obstacle]', '[T_table, T_obs]'], {}), '([table, obstacle], [T_table, T_obs])\n', (1961, 1998), False, 'from acrobotics.geometry import Scene\n'), ((2013, 2048), 'numpy.array', 'np.array', (['[1.0, 1.2, -0.5, 0, 0, 0]'], {}), '([1.0, 1.2, -0.5, 0, 0, 0])\n', (2021, 2048), True, 'import numpy as np\n'), ((2062, 2097), 'numpy.array', 'np.array', (['[2.0, 1.2, -0.5, 0, 0, 0]'], {}), '([2.0, 1.2, -0.5, 0, 0, 0])\n', (2070, 2097), True, 'import numpy as np\n')] |
"""
Part 2 of https://adventofcode.com/2020/day/9
"""
import part1
import numpy as np
INVALID = 1309761972 # Answer from Part1
def find_sum(data):
for i in range(len(data)):
for j in range(0, i + 1):
moving_slice = data[j:i]
if sum(moving_slice) == INVALID:
return np.min(moving_slice) + np.max(moving_slice)
return 0
if __name__ == "__main__":
data = [int(i) for i in part1.read_data("input.txt")]
print(
f"Solution: The sum of the smallest and largest numbers in a contiguous range that add up to Part 1's answer is {find_sum(data)}."
)
| [
"numpy.max",
"part1.read_data",
"numpy.min"
] | [((436, 464), 'part1.read_data', 'part1.read_data', (['"""input.txt"""'], {}), "('input.txt')\n", (451, 464), False, 'import part1\n'), ((322, 342), 'numpy.min', 'np.min', (['moving_slice'], {}), '(moving_slice)\n', (328, 342), True, 'import numpy as np\n'), ((345, 365), 'numpy.max', 'np.max', (['moving_slice'], {}), '(moving_slice)\n', (351, 365), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from scipy import stats
from tensorflow.keras import layers
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler,OneHotEncoder
from itertools import product
from .layers import *
from .utils import get_interaction_list
class GAMINet(tf.keras.Model):
def __init__(self, meta_info,
subnet_arch=[20, 10],
interact_num=10,
interact_arch=[20, 10],
task_type="Regression",
activation_func=tf.tanh,
main_grid_size=41,
interact_grid_size=41,
lr_bp=0.001,
batch_size=500,
main_effect_epochs=2000,
interaction_epochs=2000,
tuning_epochs=50,
loss_threshold_main=0.01,
loss_threshold_inter=0.01,
val_ratio=0.2,
early_stop_thres=100,
random_state=0,
threshold =0.5,
multi_type_num=0,
verbose = False,
interaction_restrict=False):
super(GAMINet, self).__init__()
# Parameter initiation
self.meta_info = meta_info
self.input_num = len(meta_info) - 1
self.task_type = task_type
self.subnet_arch = subnet_arch
self.main_grid_size = main_grid_size
self.interact_grid_size = interact_grid_size
self.activation_func = activation_func
self.interact_arch = interact_arch
self.max_interact_num = int(round(self.input_num * (self.input_num - 1) / 2))
self.interact_num = min(interact_num, self.max_interact_num)
self.interact_num_added = 0
self.interaction_list = []
self.loss_threshold_main = loss_threshold_main
self.loss_threshold_inter = loss_threshold_inter
self.lr_bp = lr_bp
self.batch_size = batch_size
self.tuning_epochs = tuning_epochs
self.main_effect_epochs = main_effect_epochs
self.interaction_epochs = interaction_epochs
self.verbose = verbose
self.early_stop_thres = early_stop_thres
self.random_state = random_state
self.threshold = threshold
self.interaction_restrict = interaction_restrict
self.multi_type_num = multi_type_num
np.random.seed(random_state)
tf.random.set_seed(random_state)
self.categ_variable_num = 0
self.numerical_input_num = 0
self.categ_variable_list = []
self.categ_index_list = []
self.numerical_index_list = []
self.numerical_variable_list = []
self.variables_names = []
self.feature_type_list = []
self.interaction_status = False
self.user_feature_list = []
self.item_feature_list = []
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["source"] == "user":
self.user_feature_list.append(indice)
elif feature_info["source"] == "item":
self.item_feature_list.append(indice)
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["type"] == "target":
continue
elif feature_info["type"] == "categorical":
self.categ_variable_num += 1
self.categ_index_list.append(indice)
self.feature_type_list.append("categorical")
self.categ_variable_list.append(feature_name)
elif feature_info["type"] == "id":
continue
else:
self.numerical_input_num +=1
self.numerical_index_list.append(indice)
self.feature_type_list.append("continuous")
self.numerical_variable_list.append(feature_name)
self.variables_names.append(feature_name)
print(self.variables_names)
self.interact_num = len([item for item in product(self.user_feature_list, self.item_feature_list)])
# build
self.maineffect_blocks = MainEffectBlock(meta_info=self.meta_info,
numerical_index_list=list(self.numerical_index_list),
categ_index_list=self.categ_index_list,
subnet_arch=self.subnet_arch,
activation_func=self.activation_func,
grid_size=self.main_grid_size)
self.interact_blocks = InteractionBlock(interact_num=self.interact_num,
meta_info=self.meta_info,
interact_arch=self.interact_arch,
activation_func=self.activation_func,
grid_size=self.interact_grid_size)
self.output_layer = OutputLayer(input_num=self.input_num,
interact_num=self.interact_num,
task_type=self.task_type,
multi_type_num = self.multi_type_num)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr_bp)
if self.task_type == "Regression":
#self.loss_fn = tf.keras.losses.MeanSquaredError()
self.loss_fn = tf.keras.losses.MeanAbsoluteError()
elif self.task_type == "Classification":
self.loss_fn = tf.keras.losses.BinaryCrossentropy()
elif self.task_type == "MultiClassification":
self.loss_fn = tf.keras.losses.CategoricalCrossentropy()
elif self.task_type == "Ordinal_Regression":
self.loss_fn = tf.keras.losses.CategoricalCrossentropy()
else:
print(self.task_type)
raise ValueError("The task type is not supported")
def call(self, inputs, main_effect_training=False, interaction_training=False):
self.maineffect_outputs = self.maineffect_blocks(inputs, training=main_effect_training)
if self.interaction_status:
self.interact_outputs = self.interact_blocks(inputs, training=interaction_training)
else:
self.interact_outputs = tf.zeros([inputs.shape[0], self.interact_num])
concat_list = [self.maineffect_outputs]
if self.interact_num > 0:
concat_list.append(self.interact_outputs)
if self.task_type == "Regression":
output = self.output_layer(tf.concat(concat_list, 1))
elif self.task_type == "Classification":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
elif self.task_type == "Ordinal_Regression":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
elif self.task_type == "MultiClassification":
output = tf.nn.softmax(self.output_layer(tf.concat(concat_list, 1)))
else:
raise ValueError("The task type is not supported")
return output
@tf.function
def predict_graph(self, x, main_effect_training=False, interaction_training=False):
return self.__call__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training)
def predict_initial(self, x, main_effect_training=False, interaction_training=False):
try:
self.task_type = 'Regression'
return self.__call__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training)
finally:
self.task_type = 'Classification'
def predict(self, x):
if self.task_type == "Ordinal_Regression":
ind = self.scan(self.predict_graph(x).numpy(),self.threshold)
return tf.keras.backend.eval(ind)
if self.task_type == "MultiClassification":
ind = tf.argmax(self.predict_graph(x).numpy(),axis=1)
return tf.keras.backend.eval(ind)
return self.predict_graph(x).numpy()
@tf.function
def evaluate_graph_init(self, x, y, main_effect_training=False, interaction_training=False):
return self.loss_fn(y, self.__call__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training))
@tf.function
def evaluate_graph_inter(self, x, y, main_effect_training=False, interaction_training=False):
return self.loss_fn(y, self.__call__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training))
def evaluate(self, x, y, main_effect_training=False, interaction_training=False):
if self.interaction_status:
return self.evaluate_graph_inter(x, y,
main_effect_training=main_effect_training,
interaction_training=interaction_training).numpy()
else:
return self.evaluate_graph_init(x, y,
main_effect_training=main_effect_training,
interaction_training=interaction_training).numpy()
@tf.function
def train_main_effect(self, inputs, labels, main_effect_training=True, interaction_training=False):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.maineffect_blocks.weights
train_weights.append(self.output_layer.main_effect_weights)
train_weights.append(self.output_layer.ordinal_bias)
else:
train_weights = self.maineffect_blocks.weights
train_weights.append(self.output_layer.main_effect_weights)
train_weights.append(self.output_layer.main_effect_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights)):
if train_weights[i].name in trainable_weights_names:
train_weights_list.append(train_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
@tf.function
def train_interaction(self, inputs, labels, main_effect_training=False, interaction_training=True):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.interact_blocks.weights
train_weights.append(self.output_layer.interaction_weights)
train_weights.append(self.output_layer.interaction_output_bias)
else:
train_weights = self.interact_blocks.weights
train_weights.append(self.output_layer.interaction_weights)
train_weights.append(self.output_layer.interaction_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights)):
if train_weights[i].name in trainable_weights_names:
train_weights_list.append(train_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
@tf.function
def train_all(self, inputs, labels, main_effect_training=True, interaction_training=True):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.maineffect_blocks.weights
train_weights.append(self.output_layer.main_effect_weights)
train_weights.append(self.output_layer.ordinal_bias)
else:
train_weights_main = self.maineffect_blocks.weights
train_weights_main.append(self.output_layer.main_effect_weights)
train_weights_main.append(self.output_layer.main_effect_output_bias)
train_weights_inter = self.interact_blocks.weights
train_weights_inter.append(self.output_layer.interaction_weights)
train_weights_inter.append(self.output_layer.interaction_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights_main)):
if train_weights_main[i].name in trainable_weights_names:
train_weights_list.append(train_weights_main[i])
for i in range(len(train_weights_inter)):
if train_weights_inter[i].name in trainable_weights_names:
train_weights_list.append(train_weights_inter[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
def get_main_effect_rank(self,j, tr_x):
sorted_index = np.array([])
componment_scales = [0 for i in range(self.input_num)]
beta = []
for i in range(self.input_num):
beta.append(np.std(self.maineffect_blocks.subnets[i](tr_x[:,i].reshape(-1,1),training=False),ddof=1))
#main_effect_norm = [self.maineffect_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.input_num)]
#beta = (self.output_layer.main_effect_weights[:,j].numpy() * np.array([main_effect_norm]))
if np.sum(np.abs(beta)) > 10**(-10):
componment_scales = (np.abs(beta) / np.sum(np.abs(beta))).reshape([-1])
sorted_index = np.argsort(componment_scales)[::-1]
return sorted_index, componment_scales
def get_interaction_rank(self,j, tr_x):
sorted_index = np.array([])
componment_scales = [0 for i in range(self.interact_num_added)]
gamma = []
if self.interact_num_added > 0:
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
inputs = tf.concat([tr_x[:,idx1].reshape(-1,1),tr_x[:,idx2].reshape(-1,1)],1)
gamma.append(np.std(self.interact_blocks.interacts[interact_id](inputs,training=False),ddof=1))
#interaction_norm = [self.interact_blocks.interacts[i].moving_norm.numpy()[0] for i in range(self.interact_num_added)]
#gamma = (self.output_layer.interaction_weights[:,j].numpy()[:self.interact_num_added]
# * np.array([interaction_norm]).reshape([-1, 1]))[0]
if np.sum(np.abs(gamma)) > 10**(-10):
componment_scales = (np.abs(gamma) / np.sum(np.abs(gamma))).reshape([-1])
sorted_index = np.argsort(componment_scales)[::-1]
return sorted_index, componment_scales
def get_all_active_rank(self,class_,tr_x):
#main_effect_norm = [self.maineffect_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.input_num)]
#beta = (self.output_layer.main_effect_weights[:,class_].numpy() * np.array([main_effect_norm])
# * self.output_layer.main_effect_switcher[:,class_].numpy()).reshape([-1, 1])
beta = []
gamma = []
for i in range(self.input_num):
beta.append(np.std(self.maineffect_blocks.subnets[i](tr_x[:,i].reshape(-1,1),training=False),ddof=1))
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
inputs = tf.concat([tr_x[:,idx1].reshape(-1,1),tr_x[:,idx2].reshape(-1,1)],1)
gamma.append(np.std(self.interact_blocks.interacts[interact_id](inputs,training=False),ddof=1))
beta = np.array(beta * self.output_layer.main_effect_switcher[:,class_].numpy()).reshape(-1,1)
gamma = np.array(gamma * self.output_layer.interaction_switcher[:,class_].numpy()).reshape(-1,1)
#interaction_norm = [self.interact_blocks.interacts[i].moving_norm.numpy()[0] for i in range(self.interact_num_added)]
#gamma = (self.output_layer.interaction_weights[:,class_].numpy()[:self.interact_num_added]
# * np.array([interaction_norm])
# * self.output_layer.interaction_switcher[:,class_].numpy()[:self.interact_num_added]).reshape([-1, 1])
#gamma = np.vstack([gamma, np.zeros((self.interact_num - self.interact_num_added, 1)).reshape([-1, 1]) ])
componment_coefs = np.vstack([beta, gamma])
if np.sum(np.abs(componment_coefs)) > 10**(-10):
componment_scales = (np.abs(componment_coefs) / np.sum(np.abs(componment_coefs))).reshape([-1])
else:
componment_scales = [0 for i in range(self.input_num + self.interact_num_added)]
return componment_scales
def get_component(self, tr_x):
#main_effect_norm = [self.maineffect_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.input_num)]
#beta = (self.output_layer.main_effect_weights[:,0].numpy() * np.array([main_effect_norm])
# * self.output_layer.main_effect_switcher[:,0].numpy()).reshape([-1, 1])
#interaction_norm = [self.interact_blocks.interacts[i].moving_norm.numpy()[0] for i in range(self.interact_num_added)]
#gamma = (self.output_layer.interaction_weights[:,0].numpy()[:self.interact_num_added]
# * np.array([interaction_norm])
# * self.output_layer.interaction_switcher[:,0].numpy()[:self.interact_num_added]).reshape([-1, 1])
#gamma = np.vstack([gamma, np.zeros((self.interact_num - self.interact_num_added, 1)).reshape([-1, 1]) ])
beta = []
gamma = []
for i in range(self.input_num):
beta.append(np.std(self.maineffect_blocks.subnets[i](tr_x[:,i].reshape(-1,1),training=False),ddof=1))
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
inputs = tf.concat([tr_x[:,idx1].reshape(-1,1),tr_x[:,idx2].reshape(-1,1)],1)
gamma.append(np.std(self.interact_blocks.interacts[interact_id](inputs,training=False),ddof=1))
beta = np.array(beta * self.output_layer.main_effect_switcher[:,0].numpy()).reshape(-1,1)
gamma = np.array(gamma * self.output_layer.interaction_switcher[:,0].numpy()).reshape(-1,1)
return beta, gamma
def estimate_density(self, x):
n_samples = x.shape[0]
self.data_dict_density = {}
for indice in range(self.input_num):
feature_name = list(self.variables_names)[indice]
if indice in self.numerical_index_list:
sx = self.meta_info[feature_name]["scaler"]
density, bins = np.histogram(sx.inverse_transform(x[:,[indice]]), bins=10, density=True)
self.data_dict_density.update({feature_name:{"density":{"names":bins,"scores":density}}})
elif indice in self.categ_index_list:
unique, counts = np.unique(x[:, indice], return_counts=True)
density = np.zeros((len(self.meta_info[feature_name]["values"])))
density[unique.astype(int)] = counts / n_samples
self.data_dict_density.update({feature_name:{"density":{"names":np.arange(len(self.meta_info[feature_name]["values"])),
"scores":density}}})
def coding(self,y):
re = np.zeros((y.shape[0],4))
for i in range(y.shape[0]):
if y[i]== 1:
re[i] = np.array([0,0,0,0])
elif y[i] ==2:
re[i] = np.array([1,0,0,0])
elif y[i] ==3:
re[i] = np.array([1,1,0,0])
elif y[i] ==4:
re[i] = np.array([1,1,1,0])
elif y[i] ==5:
re[i] = np.array([1,1,1,1])
return re
def scan(self, x, threshold):
res = np.zeros((x.shape[0],1))
for i in range(x.shape[0]):
res[i] = 5
for j in range(x.shape[1]):
if x[i,j] < threshold:
res[i] = j+1
break
#elif j==4:
# res[i] = j+1
# break
return res
def fit_main_effect(self, tr_x, tr_y, val_x, val_y):
## specify grid points
for i in range(self.input_num):
if i in self.categ_index_list:
length = len(self.meta_info[self.variables_names[i]]["values"])
input_grid = np.arange(len(self.meta_info[self.variables_names[i]]["values"]))
else:
length = self.main_grid_size
input_grid = np.linspace(0, 1, length)
pdf_grid = np.ones([length]) / length
self.maineffect_blocks.subnets[i].set_pdf(np.array(input_grid, dtype=np.float32).reshape([-1, 1]),
np.array(pdf_grid, dtype=np.float32).reshape([1, -1]))
last_improvement = 0
best_validation = np.inf
train_size = tr_x.shape[0]
for epoch in range(self.main_effect_epochs):
if self.task_type != "Ordinal_Regression":
shuffle_index = np.arange(tr_x.shape[0])
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_main_effect_training.append(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_main_effect_training.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Main effects training epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_main_effect_training[-1], self.err_val_main_effect_training[-1]))
if self.err_val_main_effect_training[-1] < best_validation:
best_validation = self.err_val_main_effect_training[-1]
last_improvement = epoch
if epoch - last_improvement > self.early_stop_thres:
if self.verbose:
print("Early stop at epoch %d, with validation loss: %0.5f" % (epoch + 1, self.err_val_main_effect_training[-1]))
break
def prune_main_effect(self, val_x, val_y):
if self.multi_type_num == 0:
self.main_effect_val_loss = []
sorted_index, componment_scales = self.get_main_effect_rank(0,self.tr_x)
self.output_layer.main_effect_switcher.assign(tf.constant(np.zeros((self.input_num, 1)), dtype=tf.float32))
self.main_effect_val_loss.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False) )
for idx in range(self.input_num):
selected_index = sorted_index[:(idx + 1)]
main_effect_switcher = np.zeros((self.input_num, 1))
main_effect_switcher[selected_index] = 1
self.output_layer.main_effect_switcher.assign(tf.constant(main_effect_switcher, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.main_effect_val_loss.append(val_loss)
best_loss = np.min(self.main_effect_val_loss)
if np.sum((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main) > 0:
best_idx = np.where((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main)[0][0]
else:
best_idx = np.argmin(self.main_effect_val_loss)
self.active_main_effect_index = sorted_index[:best_idx]
main_effect_switcher = np.zeros((self.input_num, 1))
main_effect_switcher[self.active_main_effect_index] = 1
self.output_layer.main_effect_switcher.assign(tf.constant(main_effect_switcher, dtype=tf.float32))
else:
self.active_main_effect_index = []
for i in range(self.multi_type_num):
tmp1 = self.output_layer.main_effect_switcher.numpy()
tmp1[:,i] = np.zeros(self.input_num).ravel()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp1, dtype=tf.float32))
sorted_index, componment_scales = self.get_main_effect_rank(i)
self.main_effect_val_loss = []
self.main_effect_val_loss.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False) )
for idx in range(self.input_num):
selected_index = sorted_index[:(idx + 1)]
main_effect_switcher = np.zeros((self.input_num, 1))
main_effect_switcher[selected_index] = 1
tmp = self.output_layer.main_effect_switcher.numpy()
tmp[:,i] = main_effect_switcher.ravel()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.main_effect_val_loss.append(val_loss)
best_loss = np.min(self.main_effect_val_loss)
if np.sum((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main) > 0:
best_idx = np.where((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main)[0][0]
else:
best_idx = np.argmin(self.main_effect_val_loss)
self.active_main_effect_index.append(sorted_index[:best_idx])
main_effect_switcher = np.zeros((self.input_num, 1))
main_effect_switcher[self.active_main_effect_index[-1].astype(int)] = 1
tmp2 = self.output_layer.main_effect_switcher.numpy()
tmp2[:,i] = main_effect_switcher.ravel()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp2, dtype=tf.float32))
def fine_tune_main_effect(self, tr_x, tr_y, val_x, val_y):
train_size = tr_x.shape[0]
for epoch in range(self.tuning_epochs):
shuffle_index = np.arange(tr_x.shape[0])
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_main_effect_tuning.append(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_main_effect_tuning.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Main effects tuning epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_main_effect_tuning[-1], self.err_val_main_effect_tuning[-1]))
def add_interaction(self, tr_x, tr_y, val_x, val_y):
tr_pred = self.__call__(tf.cast(tr_x, tf.float32), main_effect_training=False, interaction_training=False).numpy().astype(np.float64)
val_pred = self.__call__(tf.cast(val_x, tf.float32), main_effect_training=False, interaction_training=False).numpy().astype(np.float64)
if self.multi_type_num == 0:
interaction_list_all = get_interaction_list(tr_x, val_x, tr_y.ravel(), val_y.ravel(),
tr_pred.ravel(), val_pred.ravel(),
self.variables_names,
self.feature_type_list,
task_type=self.task_type,
active_main_effect_index=self.active_main_effect_index,
user_feature_list=self.user_feature_list,
item_feature_list=self.item_feature_list,
interaction_restrict=self.interaction_restrict)
self.interaction_list = interaction_list_all[:self.interact_num]
self.interact_num_added = len(self.interaction_list)
interaction_switcher = np.zeros((self.interact_num, 1))
interaction_switcher[:self.interact_num_added] = 1
self.output_layer.interaction_switcher.assign(tf.constant(interaction_switcher, dtype=tf.float32))
self.interact_blocks.set_interaction_list(self.interaction_list)
else:
active_index_inter = []
for fe_num in range(self.input_num):
count_int = 0
for num in range(self.multi_type_num):
if (self.active_main_effect_index[num]==fe_num).sum()==1:
count_int = count_int +1
if count_int > self.multi_type_num/5:
active_index_inter.append(fe_num)
interaction_list_all = get_interaction_list(tr_x, val_x, tr_y.ravel(), val_y.ravel(),
tr_pred.ravel(), val_pred.ravel(),
self.variables_names,
self.feature_type_list,
task_type=self.task_type,
active_main_effect_index=active_index_inter)
self.interaction_list = interaction_list_all[:self.interact_num]
self.interact_num_added = len(self.interaction_list)
interaction_switcher = np.zeros((self.interact_num, 1))
interaction_switcher[:self.interact_num_added] = 1
for i in range(self.multi_type_num):
tmp = self.output_layer.interaction_switcher.numpy()
tmp[:,i] = interaction_switcher.ravel()
self.output_layer.interaction_switcher.assign(tf.constant(tmp, dtype=tf.float32))
self.interact_blocks.set_interaction_list(self.interaction_list)
def fit_interaction(self, tr_x, tr_y, val_x, val_y):
# specify grid points
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
feature_name1 = self.variables_names[idx1]
feature_name2 = self.variables_names[idx2]
if feature_name1 in self.categ_variable_list:
length1 = len(self.meta_info[feature_name1]["values"])
length1_grid = np.arange(length1)
else:
length1 = self.interact_grid_size
length1_grid = np.linspace(0, 1, length1)
if feature_name2 in self.categ_variable_list:
length2 = len(self.meta_info[feature_name2]["values"])
length2_grid = np.arange(length2)
else:
length2 = self.interact_grid_size
length2_grid = np.linspace(0, 1, length2)
x1, x2 = np.meshgrid(length1_grid, length2_grid)
input_grid = np.hstack([np.reshape(x1, [-1, 1]), np.reshape(x2, [-1, 1])])
pdf_grid = np.ones([length1, length2]) / (length1 * length2)
self.interact_blocks.interacts[interact_id].set_pdf(np.array(input_grid, dtype=np.float32),
np.array(pdf_grid, dtype=np.float32).T)
last_improvement = 0
best_validation = np.inf
train_size = tr_x.shape[0]
self.interaction_status = True
for epoch in range(self.interaction_epochs):
shuffle_index = np.arange(tr_x.shape[0])
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_interaction(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_interaction_training.append(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_interaction_training.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Interaction training epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_interaction_training[-1], self.err_val_interaction_training[-1]))
if self.err_val_interaction_training[-1] < best_validation:
best_validation = self.err_val_interaction_training[-1]
last_improvement = epoch
if epoch - last_improvement > self.early_stop_thres:
if self.verbose:
print("Early stop at epoch %d, with validation loss: %0.5f" % (epoch + 1, self.err_val_interaction_training[-1]))
break
def prune_interaction(self, val_x, val_y):
if self.multi_type_num ==0:
self.interaction_val_loss = []
sorted_index, componment_scales = self.get_interaction_rank(0, self.tr_x)
self.output_layer.interaction_switcher.assign(tf.constant(np.zeros((self.interact_num, 1)), dtype=tf.float32))
self.interaction_val_loss.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
for idx in range(self.interact_num_added) :
selected_index = sorted_index[:(idx + 1)]
interaction_switcher = np.zeros((self.interact_num, 1))
interaction_switcher[selected_index] = 1
self.output_layer.interaction_switcher.assign(tf.constant(interaction_switcher, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.interaction_val_loss.append(val_loss)
best_loss = np.min(self.interaction_val_loss)
if np.sum((self.interaction_val_loss / best_loss - 1) < self.loss_threshold_inter) > 0:
best_idx = np.where((self.interaction_val_loss / best_loss - 1) < self.loss_threshold_inter)[0][0]
else:
best_idx = np.argmin(self.interaction_val_loss)
self.active_interaction_index = sorted_index[:best_idx]
interaction_switcher = np.zeros((self.interact_num, 1))
interaction_switcher[self.active_interaction_index] = 1
self.output_layer.interaction_switcher.assign(tf.constant(interaction_switcher, dtype=tf.float32))
else:
for i in range(self.multi_type_num):
self.interaction_val_loss = []
self.active_interaction_index = []
sorted_index, componment_scales = self.get_interaction_rank(i)
tmp = self.output_layer.interaction_switcher.numpy()
tmp[:,i] = np.zeros(self.interact_num).ravel()
self.output_layer.interaction_switcher.assign(tf.constant(tmp, dtype=tf.float32))
self.interaction_val_loss.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
for idx in range(self.interact_num_added) :
selected_index = sorted_index[:(idx + 1)]
interaction_switcher = np.zeros((self.interact_num, 1))
interaction_switcher[selected_index] = 1
tmp1 = self.output_layer.interaction_switcher.numpy()
tmp1[:,i] = interaction_switcher.ravel()
self.output_layer.interaction_switcher.assign(tf.constant(tmp1, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.interaction_val_loss.append(val_loss)
best_loss = np.min(self.interaction_val_loss)
if np.sum((self.interaction_val_loss / best_loss - 1) < self.loss_threshold_inter) > 0:
best_idx = np.where((self.interaction_val_loss / best_loss - 1) < self.loss_threshold_inter)[0][0]
else:
best_idx = np.argmin(self.interaction_val_loss)
self.active_interaction_index.append(sorted_index[:best_idx])
interaction_switcher = np.zeros((self.interact_num, 1))
interaction_switcher[self.active_interaction_index[-1].astype(int)] = 1
tmp2 = self.output_layer.interaction_switcher.numpy()
tmp2[:,i] = interaction_switcher.ravel()
self.output_layer.interaction_switcher.assign(tf.constant(tmp2, dtype=tf.float32))
def fine_tune_interaction(self, tr_x, tr_y, val_x, val_y):
train_size = tr_x.shape[0]
for epoch in range(self.tuning_epochs):
shuffle_index = np.arange(train_size)
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_interaction(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_interaction_tuning.append(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_interaction_tuning.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Interaction tuning epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_interaction_tuning[-1], self.err_val_interaction_tuning[-1]))
def fine_tune(self, tr_x, tr_y, val_x, val_y):
train_size = tr_x.shape[0]
for epoch in range(self.tuning_epochs):
shuffle_index = np.arange(train_size)
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_all(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_all_tuning.append(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_all_tuning.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("final tuning epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_all_tuning[-1], self.err_val_all_tuning[-1]))
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
def fit(self, tr_x, val_x, tr_y, val_y, tr_idx, val_idx):
## data loading
self.tr_x = tr_x
if self.task_type == "Ordinal_Regression":
tr_y_c = self.coding(tr_y)
idx = np.argsort(tr_y.reshape(1,-1))[0][::-1]
tr_y = tr_y_c[idx]
tr_x = tr_x[idx]
val_y = self.coding(val_y)
elif self.task_type == "MultiClassification":
enc = OneHotEncoder()
enc.fit(tr_y.reshape(-1,1))
tr_y = enc.transform(tr_y.reshape(-1,1)).toarray()
val_y = enc.transform(val_y.reshape(-1,1)).toarray()
self.tr_idx = tr_idx
self.val_idx = val_idx
## initialization
self.data_dict_density = {}
self.err_train_main_effect_training = []
self.err_val_main_effect_training = []
self.err_train_main_effect_tuning = []
self.err_val_main_effect_tuning = []
self.err_train_interaction_training = []
self.err_val_interaction_training = []
self.err_train_interaction_tuning = []
self.err_val_interaction_tuning = []
self.err_train_all_tuning = []
self.err_val_all_tuning = []
self.interaction_list = []
self.active_main_effect_index = []
self.active_interaction_index = []
self.main_effect_val_loss = []
self.interaction_val_loss = []
self.estimate_density(tr_x)
if self.verbose:
print("#" * 20 + "GAMI-Net training start." + "#" * 20)
## step 1: main effects
if self.verbose:
print("#" * 10 + "Stage 1: main effect training start." + "#" * 10)
self.fit_main_effect(tr_x, tr_y, val_x, val_y)
if self.verbose:
print("#" * 10 + "Stage 1: main effect training stop." + "#" * 10)
self.prune_main_effect(val_x, val_y)
if self.multi_type_num ==0:
if len(self.active_main_effect_index) == 0:
if self.verbose:
print("#" * 10 + "No main effect is selected, training stop." + "#" * 10)
return
elif len(self.active_main_effect_index) < self.input_num:
if self.verbose:
print(str(self.input_num - len(self.active_main_effect_index)) + " main effects are pruned, start tuning." + "#" * 10)
self.fine_tune_main_effect(tr_x, tr_y, val_x, val_y)
else:
if self.verbose:
print("#" * 10 + "No main effect is pruned, the tuning step is skipped." + "#" * 10)
else:
ame_count1=0
ame_count2=0
class_=-1
for ame_index in self.active_main_effect_index:
class_ = class_+1
if len(ame_index) == 0:
ame_count1 = ame_count1+1
print("#" * 10 + "No main effect is selected for class "+str(class_)+ "#" * 10)
continue
elif len(ame_index) < self.input_num:
ame_count2 = ame_count2+1
if self.verbose:
print(str(self.input_num - len(ame_index)) + " main effects are pruned for class "+str(class_) + "#" * 10)
continue
else:
if self.verbose:
print("#" * 10 + "No main effect is pruned, the tuning step is skipped." + "#" * 10)
continue
if ame_count1 == self.multi_type_num:
if self.verbose:
print("#" * 10 + "No main effect is selected, training stop." + "#" * 10)
return
elif ame_count2 >0:
print("start tuning.")
self.fine_tune_main_effect(tr_x, tr_y, val_x, val_y)
## step2: interaction
if self.interact_num == 0:
if self.verbose:
print("#" * 10 + "Max interaction is specified to zero, training stop." + "#" * 10)
return
if self.verbose:
print("#" * 10 + "Stage 2: interaction training start." + "#" * 10)
self.add_interaction(tr_x, tr_y, val_x, val_y)
self.fit_interaction(tr_x, tr_y, val_x, val_y)
if self.verbose:
print("#" * 10 + "Stage 2: interaction training stop." + "#" * 10)
self.prune_interaction(val_x, val_y)
if len(self.active_interaction_index) == 0:
if self.verbose:
print("#" * 10 + "No interaction is selected, the model returns to GAM." + "#" * 10)
self.output_layer.interaction_output_bias.assign(tf.constant(np.zeros([self.multi_type_num+1]).ravel(), dtype=tf.float32))
elif len(self.active_interaction_index) < len(self.interaction_list):
if self.verbose:
print("#" * 10 + str(len(self.interaction_list) - len(self.active_interaction_index))
+ " interactions are pruned, start tuning." + "#" * 10)
self.fine_tune(tr_x, tr_y, val_x, val_y)
else:
if self.verbose:
print("#" * 10 + "No main interaction is pruned, the tuning step is skipped.")
if self.verbose:
print("#" * 20 + "GAMI-Net training finished." + "#" * 20)
def summary_logs(self, save_dict=False, folder="./", name="summary_logs"):
data_dict_log = {}
data_dict_log.update({"err_train_main_effect_training":self.err_train_main_effect_training,
"err_val_main_effect_training":self.err_val_main_effect_training,
"err_train_main_effect_tuning":self.err_train_main_effect_tuning,
"err_val_main_effect_tuning":self.err_val_main_effect_tuning,
"err_train_interaction_training":self.err_train_interaction_training,
"err_val_interaction_training":self.err_val_interaction_training,
"err_train_interaction_tuning":self.err_train_all_tuning,
"err_val_interaction_tuning":self.err_val_all_tuning,
"interaction_list":self.interaction_list,
"active_main_effect_index":self.active_main_effect_index,
"active_interaction_index":self.active_interaction_index,
"main_effect_val_loss":self.main_effect_val_loss,
"interaction_val_loss":self.interaction_val_loss})
if save_dict:
if not os.path.exists(folder):
os.makedirs(folder)
save_path = folder + name
np.save("%s.npy" % save_path, data_dict_log)
return data_dict_log
def global_explain(self, class_=0, main_grid_size=None, interact_grid_size=None, save_dict=False, folder="./", name="global_explain", threshold=0):
## By default, we use the same main_grid_size and interact_grid_size as that of the zero mean constraint
## Alternatively, we can also specify it manually, e.g., when we want to have the same grid size as EBM (256).
if main_grid_size is None:
main_grid_size = self.main_grid_size
if interact_grid_size is None:
interact_grid_size = self.interact_grid_size
data_dict_global = self.data_dict_density
componment_scales = self.get_all_active_rank(class_,self.tr_x)
componment_scales[componment_scales < threshold] =0
for indice in range(self.input_num):
feature_name = list(self.variables_names)[indice]
subnet = self.maineffect_blocks.subnets[indice]
if indice in self.numerical_index_list:
sx = self.meta_info[feature_name]["scaler"]
main_effect_inputs = np.linspace(0, 1, main_grid_size).reshape([-1, 1])
main_effect_inputs_original = sx.inverse_transform(main_effect_inputs)
main_effect_outputs = (self.output_layer.main_effect_weights[:,class_].numpy()[indice]
* self.output_layer.main_effect_switcher[:,class_].numpy()[indice]
* subnet.__call__(tf.cast(tf.constant(main_effect_inputs), tf.float32)).numpy())
data_dict_global[feature_name].update({"type":"continuous",
"importance":componment_scales[indice],
"inputs":main_effect_inputs_original.ravel(),
"outputs":main_effect_outputs.ravel()})
elif indice in self.categ_index_list:
main_effect_inputs_original = self.meta_info[feature_name]["values"]
main_effect_inputs = np.arange(len(main_effect_inputs_original)).reshape([-1, 1])
main_effect_outputs = (self.output_layer.main_effect_weights[:,class_].numpy()[indice]
* self.output_layer.main_effect_switcher[:,class_].numpy()[indice]
* subnet.__call__(tf.cast(main_effect_inputs, tf.float32)).numpy())
main_effect_input_ticks = (main_effect_inputs.ravel().astype(int) if len(main_effect_inputs_original) <= 6 else
np.linspace(0.1 * len(main_effect_inputs_original), len(main_effect_inputs_original) * 0.9, 4).astype(int))
main_effect_input_labels = [main_effect_inputs_original[i] for i in main_effect_input_ticks]
if len("".join(list(map(str, main_effect_input_labels)))) > 30:
main_effect_input_labels = [str(main_effect_inputs_original[i])[:4] for i in main_effect_input_ticks]
data_dict_global[feature_name].update({"type":"categorical",
"importance":componment_scales[indice],
"inputs":main_effect_inputs_original,
"outputs":main_effect_outputs.ravel(),
"input_ticks":main_effect_input_ticks,
"input_labels":main_effect_input_labels})
for indice in range(self.interact_num_added):
response = []
inter_net = self.interact_blocks.interacts[indice]
feature_name1 = self.variables_names[self.interaction_list[indice][0]]
feature_name2 = self.variables_names[self.interaction_list[indice][1]]
feature_type1 = "categorical" if feature_name1 in self.categ_variable_list else "continuous"
feature_type2 = "categorical" if feature_name2 in self.categ_variable_list else "continuous"
axis_extent = []
interact_input_list = []
if feature_name1 in self.categ_variable_list:
interact_input1_original = self.meta_info[feature_name1]["values"]
interact_input1 = np.arange(len(interact_input1_original), dtype=np.float32)
interact_input1_ticks = (interact_input1.astype(int) if len(interact_input1) <= 6 else
np.linspace(0.1 * len(interact_input1), len(interact_input1) * 0.9, 4).astype(int))
interact_input1_labels = [interact_input1_original[i] for i in interact_input1_ticks]
if len("".join(list(map(str, interact_input1_labels)))) > 30:
interact_input1_labels = [str(interact_input1_original[i])[:4] for i in interact_input1_ticks]
interact_input_list.append(interact_input1)
axis_extent.extend([-0.5, len(interact_input1_original) - 0.5])
else:
sx1 = self.meta_info[feature_name1]["scaler"]
interact_input1 = np.array(np.linspace(0, 1, interact_grid_size), dtype=np.float32)
interact_input1_original = sx1.inverse_transform(interact_input1.reshape([-1, 1])).ravel()
interact_input1_ticks = []
interact_input1_labels = []
interact_input_list.append(interact_input1)
axis_extent.extend([interact_input1_original.min(), interact_input1_original.max()])
if feature_name2 in self.categ_variable_list:
interact_input2_original = self.meta_info[feature_name2]["values"]
interact_input2 = np.arange(len(interact_input2_original), dtype=np.float32)
interact_input2_ticks = (interact_input2.astype(int) if len(interact_input2) <= 6 else
np.linspace(0.1 * len(interact_input2), len(interact_input2) * 0.9, 4).astype(int))
interact_input2_labels = [interact_input2_original[i] for i in interact_input2_ticks]
if len("".join(list(map(str, interact_input2_labels)))) > 30:
interact_input2_labels = [str(interact_input2_original[i])[:4] for i in interact_input2_ticks]
interact_input_list.append(interact_input2)
axis_extent.extend([-0.5, len(interact_input2_original) - 0.5])
else:
sx2 = self.meta_info[feature_name2]["scaler"]
interact_input2 = np.array(np.linspace(0, 1, interact_grid_size), dtype=np.float32)
interact_input2_original = sx2.inverse_transform(interact_input2.reshape([-1, 1])).ravel()
interact_input2_ticks = []
interact_input2_labels = []
interact_input_list.append(interact_input2)
axis_extent.extend([interact_input2_original.min(), interact_input2_original.max()])
x1, x2 = np.meshgrid(interact_input_list[0], interact_input_list[1][::-1])
input_grid = np.hstack([np.reshape(x1, [-1, 1]), np.reshape(x2, [-1, 1])])
interact_outputs = (self.output_layer.interaction_weights[:,class_].numpy()[indice]
* self.output_layer.interaction_switcher[:,class_].numpy()[indice]
* inter_net.__call__(input_grid, training=False).numpy().reshape(x1.shape))
data_dict_global.update({feature_name1 + " vs. " + feature_name2:{"type":"pairwise",
"xtype":feature_type1,
"ytype":feature_type2,
"importance":componment_scales[self.input_num + indice],
"input1":interact_input1_original,
"input2":interact_input2_original,
"outputs":interact_outputs,
"input1_ticks": interact_input1_ticks,
"input2_ticks": interact_input2_ticks,
"input1_labels": interact_input1_labels,
"input2_labels": interact_input2_labels,
"axis_extent":axis_extent}})
if save_dict:
if not os.path.exists(folder):
os.makedirs(folder)
save_path = folder + name
np.save("%s.npy" % save_path, data_dict_global)
return data_dict_global
def local_explain(self, class_, mf_output,x, y=None, save_dict=False, folder="./", name="local_explain"):
predicted = self.predict(x)
initial_predict = self.predict_initial(x).numpy()
intercept = self.output_layer.main_effect_output_bias[class_].numpy() + self.output_layer.interaction_output_bias[class_].numpy()
main_effect_output = self.maineffect_blocks.__call__(tf.cast(tf.constant(x), tf.float32)).numpy().ravel()
if self.interact_num > 0:
interaction_output = self.interact_blocks.__call__(tf.cast(tf.constant(x), tf.float32)).numpy().ravel()
else:
interaction_output = np.array([])
main_effect_weights = ((self.output_layer.main_effect_weights[:,class_].numpy()) * self.output_layer.main_effect_switcher[:,class_].numpy()).ravel()
interaction_weights = ((self.output_layer.interaction_weights[:,class_].numpy()[:self.interact_num_added])
* self.output_layer.interaction_switcher[:,class_].numpy()[:self.interact_num_added]).ravel()
interaction_weights = np.hstack([interaction_weights, np.zeros((self.interact_num - self.interact_num_added))])
scores = np.hstack([mf_output,intercept, np.hstack([main_effect_weights, interaction_weights])
* np.hstack([main_effect_output, interaction_output])])
try:
active_indice = 2 + np.hstack([-2,-1, self.active_main_effect_index[class_], self.input_num + self.active_interaction_index[0].astype(int)])
except:
active_indice = 2 + np.hstack([-2,-1, self.active_main_effect_index[class_], self.input_num + self.active_interaction_index.astype(int)])
effect_names = np.hstack(["Latent_effect","Intercept",
np.array(self.variables_names),
[self.variables_names[self.interaction_list[i][0]] + " x "
+ self.variables_names[self.interaction_list[i][1]] for i in range(len(self.interaction_list))]])
if self.task_type == "Regression":
data_dict_local = {"active_indice": active_indice.astype(int),
"scores": scores,
"effect_names": effect_names,
"predicted": predicted,
"actual": y}
else:
data_dict_local = {"active_indice": active_indice.astype(int),
"scores": scores,
"effect_names": effect_names,
"predicted": predicted,
"initial_predict": initial_predict,
"actual": y}
if save_dict:
if not os.path.exists(folder):
os.makedirs(folder)
save_path = folder + name
np.save("%s.npy" % save_path, data_dict_local)
return data_dict_local | [
"numpy.hstack",
"tensorflow.GradientTape",
"numpy.array",
"numpy.argsort",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.cast",
"numpy.arange",
"numpy.save",
"os.path.exists",
"numpy.reshape",
"numpy.where",
"itertools.product",
"tensorflow.concat",
"numpy.linspace",
"nu... | [((2506, 2534), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (2520, 2534), True, 'import numpy as np\n'), ((2543, 2575), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['random_state'], {}), '(random_state)\n', (2561, 2575), True, 'import tensorflow as tf\n'), ((5331, 5381), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'self.lr_bp'}), '(learning_rate=self.lr_bp)\n', (5355, 5381), True, 'import tensorflow as tf\n'), ((13943, 13955), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13951, 13955), True, 'import numpy as np\n'), ((14721, 14733), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14729, 14733), True, 'import numpy as np\n'), ((17280, 17304), 'numpy.vstack', 'np.vstack', (['[beta, gamma]'], {}), '([beta, gamma])\n', (17289, 17304), True, 'import numpy as np\n'), ((20223, 20248), 'numpy.zeros', 'np.zeros', (['(y.shape[0], 4)'], {}), '((y.shape[0], 4))\n', (20231, 20248), True, 'import numpy as np\n'), ((20708, 20733), 'numpy.zeros', 'np.zeros', (['(x.shape[0], 1)'], {}), '((x.shape[0], 1))\n', (20716, 20733), True, 'import numpy as np\n'), ((5515, 5550), 'tensorflow.keras.losses.MeanAbsoluteError', 'tf.keras.losses.MeanAbsoluteError', ([], {}), '()\n', (5548, 5550), True, 'import tensorflow as tf\n'), ((6392, 6438), 'tensorflow.zeros', 'tf.zeros', (['[inputs.shape[0], self.interact_num]'], {}), '([inputs.shape[0], self.interact_num])\n', (6400, 6438), True, 'import tensorflow as tf\n'), ((7320, 7342), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (7327, 7342), True, 'import tensorflow as tf\n'), ((8077, 8103), 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['ind'], {}), '(ind)\n', (8098, 8103), True, 'import tensorflow as tf\n'), ((8241, 8267), 'tensorflow.keras.backend.eval', 'tf.keras.backend.eval', (['ind'], {}), '(ind)\n', (8262, 8267), True, 'import tensorflow as tf\n'), ((9678, 9695), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9693, 9695), True, 'import tensorflow as tf\n'), ((10973, 10990), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10988, 10990), True, 'import tensorflow as tf\n'), ((12262, 12279), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (12277, 12279), True, 'import tensorflow as tf\n'), ((24529, 24562), 'numpy.min', 'np.min', (['self.main_effect_val_loss'], {}), '(self.main_effect_val_loss)\n', (24535, 24562), True, 'import numpy as np\n'), ((24961, 24990), 'numpy.zeros', 'np.zeros', (['(self.input_num, 1)'], {}), '((self.input_num, 1))\n', (24969, 24990), True, 'import numpy as np\n'), ((27496, 27520), 'numpy.arange', 'np.arange', (['tr_x.shape[0]'], {}), '(tr_x.shape[0])\n', (27505, 27520), True, 'import numpy as np\n'), ((27533, 27565), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_index'], {}), '(shuffle_index)\n', (27550, 27565), True, 'import numpy as np\n'), ((29789, 29821), 'numpy.zeros', 'np.zeros', (['(self.interact_num, 1)'], {}), '((self.interact_num, 1))\n', (29797, 29821), True, 'import numpy as np\n'), ((31160, 31192), 'numpy.zeros', 'np.zeros', (['(self.interact_num, 1)'], {}), '((self.interact_num, 1))\n', (31168, 31192), True, 'import numpy as np\n'), ((32527, 32566), 'numpy.meshgrid', 'np.meshgrid', (['length1_grid', 'length2_grid'], {}), '(length1_grid, length2_grid)\n', (32538, 32566), True, 'import numpy as np\n'), ((33137, 33161), 'numpy.arange', 'np.arange', (['tr_x.shape[0]'], {}), '(tr_x.shape[0])\n', (33146, 33161), True, 'import numpy as np\n'), ((33174, 33206), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_index'], {}), '(shuffle_index)\n', (33191, 33206), True, 'import numpy as np\n'), ((35649, 35682), 'numpy.min', 'np.min', (['self.interaction_val_loss'], {}), '(self.interaction_val_loss)\n', (35655, 35682), True, 'import numpy as np\n'), ((36083, 36115), 'numpy.zeros', 'np.zeros', (['(self.interact_num, 1)'], {}), '((self.interact_num, 1))\n', (36091, 36115), True, 'import numpy as np\n'), ((38618, 38639), 'numpy.arange', 'np.arange', (['train_size'], {}), '(train_size)\n', (38627, 38639), True, 'import numpy as np\n'), ((38652, 38684), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_index'], {}), '(shuffle_index)\n', (38669, 38684), True, 'import numpy as np\n'), ((39829, 39850), 'numpy.arange', 'np.arange', (['train_size'], {}), '(train_size)\n', (39838, 39850), True, 'import numpy as np\n'), ((39863, 39895), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_index'], {}), '(shuffle_index)\n', (39880, 39895), True, 'import numpy as np\n'), ((47664, 47708), 'numpy.save', 'np.save', (["('%s.npy' % save_path)", 'data_dict_log'], {}), "('%s.npy' % save_path, data_dict_log)\n", (47671, 47708), True, 'import numpy as np\n'), ((54692, 54757), 'numpy.meshgrid', 'np.meshgrid', (['interact_input_list[0]', 'interact_input_list[1][::-1]'], {}), '(interact_input_list[0], interact_input_list[1][::-1])\n', (54703, 54757), True, 'import numpy as np\n'), ((56389, 56436), 'numpy.save', 'np.save', (["('%s.npy' % save_path)", 'data_dict_global'], {}), "('%s.npy' % save_path, data_dict_global)\n", (56396, 56436), True, 'import numpy as np\n'), ((57153, 57165), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (57161, 57165), True, 'import numpy as np\n'), ((59324, 59370), 'numpy.save', 'np.save', (["('%s.npy' % save_path)", 'data_dict_local'], {}), "('%s.npy' % save_path, data_dict_local)\n", (59331, 59370), True, 'import numpy as np\n'), ((5627, 5663), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {}), '()\n', (5661, 5663), True, 'import tensorflow as tf\n'), ((6659, 6684), 'tensorflow.concat', 'tf.concat', (['concat_list', '(1)'], {}), '(concat_list, 1)\n', (6668, 6684), True, 'import tensorflow as tf\n'), ((7662, 7684), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (7669, 7684), True, 'import tensorflow as tf\n'), ((8473, 8495), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (8480, 8495), True, 'import tensorflow as tf\n'), ((8807, 8829), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (8814, 8829), True, 'import tensorflow as tf\n'), ((14427, 14439), 'numpy.abs', 'np.abs', (['beta'], {}), '(beta)\n', (14433, 14439), True, 'import numpy as np\n'), ((14565, 14594), 'numpy.argsort', 'np.argsort', (['componment_scales'], {}), '(componment_scales)\n', (14575, 14594), True, 'import numpy as np\n'), ((17323, 17347), 'numpy.abs', 'np.abs', (['componment_coefs'], {}), '(componment_coefs)\n', (17329, 17347), True, 'import numpy as np\n'), ((20333, 20355), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (20341, 20355), True, 'import numpy as np\n'), ((21490, 21515), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'length'], {}), '(0, 1, length)\n', (21501, 21515), True, 'import numpy as np\n'), ((21539, 21556), 'numpy.ones', 'np.ones', (['[length]'], {}), '([length])\n', (21546, 21556), True, 'import numpy as np\n'), ((22027, 22051), 'numpy.arange', 'np.arange', (['tr_x.shape[0]'], {}), '(tr_x.shape[0])\n', (22036, 22051), True, 'import numpy as np\n'), ((22068, 22100), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_index'], {}), '(shuffle_index)\n', (22085, 22100), True, 'import numpy as np\n'), ((24132, 24161), 'numpy.zeros', 'np.zeros', (['(self.input_num, 1)'], {}), '((self.input_num, 1))\n', (24140, 24161), True, 'import numpy as np\n'), ((24578, 24654), 'numpy.sum', 'np.sum', (['(self.main_effect_val_loss / best_loss - 1 < self.loss_threshold_main)'], {}), '(self.main_effect_val_loss / best_loss - 1 < self.loss_threshold_main)\n', (24584, 24654), True, 'import numpy as np\n'), ((24821, 24857), 'numpy.argmin', 'np.argmin', (['self.main_effect_val_loss'], {}), '(self.main_effect_val_loss)\n', (24830, 24857), True, 'import numpy as np\n'), ((25117, 25168), 'tensorflow.constant', 'tf.constant', (['main_effect_switcher'], {'dtype': 'tf.float32'}), '(main_effect_switcher, dtype=tf.float32)\n', (25128, 25168), True, 'import tensorflow as tf\n'), ((26510, 26543), 'numpy.min', 'np.min', (['self.main_effect_val_loss'], {}), '(self.main_effect_val_loss)\n', (26516, 26543), True, 'import numpy as np\n'), ((26972, 27001), 'numpy.zeros', 'np.zeros', (['(self.input_num, 1)'], {}), '((self.input_num, 1))\n', (26980, 27001), True, 'import numpy as np\n'), ((27275, 27310), 'tensorflow.constant', 'tf.constant', (['tmp2'], {'dtype': 'tf.float32'}), '(tmp2, dtype=tf.float32)\n', (27286, 27310), True, 'import tensorflow as tf\n'), ((29943, 29994), 'tensorflow.constant', 'tf.constant', (['interaction_switcher'], {'dtype': 'tf.float32'}), '(interaction_switcher, dtype=tf.float32)\n', (29954, 29994), True, 'import tensorflow as tf\n'), ((32054, 32072), 'numpy.arange', 'np.arange', (['length1'], {}), '(length1)\n', (32063, 32072), True, 'import numpy as np\n'), ((32172, 32198), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'length1'], {}), '(0, 1, length1)\n', (32183, 32198), True, 'import numpy as np\n'), ((32360, 32378), 'numpy.arange', 'np.arange', (['length2'], {}), '(length2)\n', (32369, 32378), True, 'import numpy as np\n'), ((32478, 32504), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'length2'], {}), '(0, 1, length2)\n', (32489, 32504), True, 'import numpy as np\n'), ((32677, 32704), 'numpy.ones', 'np.ones', (['[length1, length2]'], {}), '([length1, length2])\n', (32684, 32704), True, 'import numpy as np\n'), ((32791, 32829), 'numpy.array', 'np.array', (['input_grid'], {'dtype': 'np.float32'}), '(input_grid, dtype=np.float32)\n', (32799, 32829), True, 'import numpy as np\n'), ((35249, 35281), 'numpy.zeros', 'np.zeros', (['(self.interact_num, 1)'], {}), '((self.interact_num, 1))\n', (35257, 35281), True, 'import numpy as np\n'), ((35698, 35775), 'numpy.sum', 'np.sum', (['(self.interaction_val_loss / best_loss - 1 < self.loss_threshold_inter)'], {}), '(self.interaction_val_loss / best_loss - 1 < self.loss_threshold_inter)\n', (35704, 35775), True, 'import numpy as np\n'), ((35943, 35979), 'numpy.argmin', 'np.argmin', (['self.interaction_val_loss'], {}), '(self.interaction_val_loss)\n', (35952, 35979), True, 'import numpy as np\n'), ((36242, 36293), 'tensorflow.constant', 'tf.constant', (['interaction_switcher'], {'dtype': 'tf.float32'}), '(interaction_switcher, dtype=tf.float32)\n', (36253, 36293), True, 'import tensorflow as tf\n'), ((37620, 37653), 'numpy.min', 'np.min', (['self.interaction_val_loss'], {}), '(self.interaction_val_loss)\n', (37626, 37653), True, 'import numpy as np\n'), ((38084, 38116), 'numpy.zeros', 'np.zeros', (['(self.interact_num, 1)'], {}), '((self.interact_num, 1))\n', (38092, 38116), True, 'import numpy as np\n'), ((38390, 38425), 'tensorflow.constant', 'tf.constant', (['tmp2'], {'dtype': 'tf.float32'}), '(tmp2, dtype=tf.float32)\n', (38401, 38425), True, 'import tensorflow as tf\n'), ((41373, 41388), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (41386, 41388), False, 'from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n'), ((47554, 47576), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (47568, 47576), False, 'import os\n'), ((47594, 47613), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (47605, 47613), False, 'import os\n'), ((56279, 56301), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (56293, 56301), False, 'import os\n'), ((56319, 56338), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (56330, 56338), False, 'import os\n'), ((57625, 57678), 'numpy.zeros', 'np.zeros', (['(self.interact_num - self.interact_num_added)'], {}), '(self.interact_num - self.interact_num_added)\n', (57633, 57678), True, 'import numpy as np\n'), ((58309, 58339), 'numpy.array', 'np.array', (['self.variables_names'], {}), '(self.variables_names)\n', (58317, 58339), True, 'import numpy as np\n'), ((59214, 59236), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (59228, 59236), False, 'import os\n'), ((59254, 59273), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (59265, 59273), False, 'import os\n'), ((4186, 4241), 'itertools.product', 'product', (['self.user_feature_list', 'self.item_feature_list'], {}), '(self.user_feature_list, self.item_feature_list)\n', (4193, 4241), False, 'from itertools import product\n'), ((5745, 5786), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (5784, 5786), True, 'import tensorflow as tf\n'), ((15474, 15487), 'numpy.abs', 'np.abs', (['gamma'], {}), '(gamma)\n', (15480, 15487), True, 'import numpy as np\n'), ((15623, 15652), 'numpy.argsort', 'np.argsort', (['componment_scales'], {}), '(componment_scales)\n', (15633, 15652), True, 'import numpy as np\n'), ((19785, 19828), 'numpy.unique', 'np.unique', (['x[:, indice]'], {'return_counts': '(True)'}), '(x[:, indice], return_counts=True)\n', (19794, 19828), True, 'import numpy as np\n'), ((20404, 20426), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (20412, 20426), True, 'import numpy as np\n'), ((22501, 22530), 'tensorflow.cast', 'tf.cast', (['batch_xx', 'tf.float32'], {}), '(batch_xx, tf.float32)\n', (22508, 22530), True, 'import tensorflow as tf\n'), ((23808, 23837), 'numpy.zeros', 'np.zeros', (['(self.input_num, 1)'], {}), '((self.input_num, 1))\n', (23816, 23837), True, 'import numpy as np\n'), ((24281, 24332), 'tensorflow.constant', 'tf.constant', (['main_effect_switcher'], {'dtype': 'tf.float32'}), '(main_effect_switcher, dtype=tf.float32)\n', (24292, 24332), True, 'import tensorflow as tf\n'), ((25491, 25526), 'tensorflow.constant', 'tf.constant', (['tmp1'], {'dtype': 'tf.float32'}), '(tmp1, dtype=tf.float32)\n', (25502, 25526), True, 'import tensorflow as tf\n'), ((25952, 25981), 'numpy.zeros', 'np.zeros', (['(self.input_num, 1)'], {}), '((self.input_num, 1))\n', (25960, 25981), True, 'import numpy as np\n'), ((26563, 26639), 'numpy.sum', 'np.sum', (['(self.main_effect_val_loss / best_loss - 1 < self.loss_threshold_main)'], {}), '(self.main_effect_val_loss / best_loss - 1 < self.loss_threshold_main)\n', (26569, 26639), True, 'import numpy as np\n'), ((26818, 26854), 'numpy.argmin', 'np.argmin', (['self.main_effect_val_loss'], {}), '(self.main_effect_val_loss)\n', (26827, 26854), True, 'import numpy as np\n'), ((27958, 27987), 'tensorflow.cast', 'tf.cast', (['batch_xx', 'tf.float32'], {}), '(batch_xx, tf.float32)\n', (27965, 27987), True, 'import tensorflow as tf\n'), ((31493, 31527), 'tensorflow.constant', 'tf.constant', (['tmp'], {'dtype': 'tf.float32'}), '(tmp, dtype=tf.float32)\n', (31504, 31527), True, 'import tensorflow as tf\n'), ((32603, 32626), 'numpy.reshape', 'np.reshape', (['x1', '[-1, 1]'], {}), '(x1, [-1, 1])\n', (32613, 32626), True, 'import numpy as np\n'), ((32628, 32651), 'numpy.reshape', 'np.reshape', (['x2', '[-1, 1]'], {}), '(x2, [-1, 1])\n', (32638, 32651), True, 'import numpy as np\n'), ((32878, 32914), 'numpy.array', 'np.array', (['pdf_grid'], {'dtype': 'np.float32'}), '(pdf_grid, dtype=np.float32)\n', (32886, 32914), True, 'import numpy as np\n'), ((33599, 33628), 'tensorflow.cast', 'tf.cast', (['batch_xx', 'tf.float32'], {}), '(batch_xx, tf.float32)\n', (33606, 33628), True, 'import tensorflow as tf\n'), ((34913, 34945), 'numpy.zeros', 'np.zeros', (['(self.interact_num, 1)'], {}), '((self.interact_num, 1))\n', (34921, 34945), True, 'import numpy as np\n'), ((35401, 35452), 'tensorflow.constant', 'tf.constant', (['interaction_switcher'], {'dtype': 'tf.float32'}), '(interaction_switcher, dtype=tf.float32)\n', (35412, 35452), True, 'import tensorflow as tf\n'), ((36746, 36780), 'tensorflow.constant', 'tf.constant', (['tmp'], {'dtype': 'tf.float32'}), '(tmp, dtype=tf.float32)\n', (36757, 36780), True, 'import tensorflow as tf\n'), ((37081, 37113), 'numpy.zeros', 'np.zeros', (['(self.interact_num, 1)'], {}), '((self.interact_num, 1))\n', (37089, 37113), True, 'import numpy as np\n'), ((37673, 37750), 'numpy.sum', 'np.sum', (['(self.interaction_val_loss / best_loss - 1 < self.loss_threshold_inter)'], {}), '(self.interaction_val_loss / best_loss - 1 < self.loss_threshold_inter)\n', (37679, 37750), True, 'import numpy as np\n'), ((37930, 37966), 'numpy.argmin', 'np.argmin', (['self.interaction_val_loss'], {}), '(self.interaction_val_loss)\n', (37939, 37966), True, 'import numpy as np\n'), ((39077, 39106), 'tensorflow.cast', 'tf.cast', (['batch_xx', 'tf.float32'], {}), '(batch_xx, tf.float32)\n', (39084, 39106), True, 'import tensorflow as tf\n'), ((40280, 40309), 'tensorflow.cast', 'tf.cast', (['batch_xx', 'tf.float32'], {}), '(batch_xx, tf.float32)\n', (40287, 40309), True, 'import tensorflow as tf\n'), ((40863, 40892), 'tensorflow.cast', 'tf.cast', (['batch_xx', 'tf.float32'], {}), '(batch_xx, tf.float32)\n', (40870, 40892), True, 'import tensorflow as tf\n'), ((52817, 52854), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'interact_grid_size'], {}), '(0, 1, interact_grid_size)\n', (52828, 52854), True, 'import numpy as np\n'), ((54242, 54279), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'interact_grid_size'], {}), '(0, 1, interact_grid_size)\n', (54253, 54279), True, 'import numpy as np\n'), ((54794, 54817), 'numpy.reshape', 'np.reshape', (['x1', '[-1, 1]'], {}), '(x1, [-1, 1])\n', (54804, 54817), True, 'import numpy as np\n'), ((54819, 54842), 'numpy.reshape', 'np.reshape', (['x2', '[-1, 1]'], {}), '(x2, [-1, 1])\n', (54829, 54842), True, 'import numpy as np\n'), ((57734, 57787), 'numpy.hstack', 'np.hstack', (['[main_effect_weights, interaction_weights]'], {}), '([main_effect_weights, interaction_weights])\n', (57743, 57787), True, 'import numpy as np\n'), ((57833, 57884), 'numpy.hstack', 'np.hstack', (['[main_effect_output, interaction_output]'], {}), '([main_effect_output, interaction_output])\n', (57842, 57884), True, 'import numpy as np\n'), ((5875, 5916), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (5914, 5916), True, 'import tensorflow as tf\n'), ((6788, 6813), 'tensorflow.concat', 'tf.concat', (['concat_list', '(1)'], {}), '(concat_list, 1)\n', (6797, 6813), True, 'import tensorflow as tf\n'), ((14487, 14499), 'numpy.abs', 'np.abs', (['beta'], {}), '(beta)\n', (14493, 14499), True, 'import numpy as np\n'), ((17395, 17419), 'numpy.abs', 'np.abs', (['componment_coefs'], {}), '(componment_coefs)\n', (17401, 17419), True, 'import numpy as np\n'), ((20475, 20497), 'numpy.array', 'np.array', (['[1, 1, 0, 0]'], {}), '([1, 1, 0, 0])\n', (20483, 20497), True, 'import numpy as np\n'), ((21624, 21662), 'numpy.array', 'np.array', (['input_grid'], {'dtype': 'np.float32'}), '(input_grid, dtype=np.float32)\n', (21632, 21662), True, 'import numpy as np\n'), ((21721, 21757), 'numpy.array', 'np.array', (['pdf_grid'], {'dtype': 'np.float32'}), '(pdf_grid, dtype=np.float32)\n', (21729, 21757), True, 'import numpy as np\n'), ((24689, 24767), 'numpy.where', 'np.where', (['(self.main_effect_val_loss / best_loss - 1 < self.loss_threshold_main)'], {}), '(self.main_effect_val_loss / best_loss - 1 < self.loss_threshold_main)\n', (24697, 24767), True, 'import numpy as np\n'), ((25396, 25420), 'numpy.zeros', 'np.zeros', (['self.input_num'], {}), '(self.input_num)\n', (25404, 25420), True, 'import numpy as np\n'), ((26242, 26276), 'tensorflow.constant', 'tf.constant', (['tmp'], {'dtype': 'tf.float32'}), '(tmp, dtype=tf.float32)\n', (26253, 26276), True, 'import tensorflow as tf\n'), ((35810, 35889), 'numpy.where', 'np.where', (['(self.interaction_val_loss / best_loss - 1 < self.loss_threshold_inter)'], {}), '(self.interaction_val_loss / best_loss - 1 < self.loss_threshold_inter)\n', (35818, 35889), True, 'import numpy as np\n'), ((36648, 36675), 'numpy.zeros', 'np.zeros', (['self.interact_num'], {}), '(self.interact_num)\n', (36656, 36675), True, 'import numpy as np\n'), ((37376, 37411), 'tensorflow.constant', 'tf.constant', (['tmp1'], {'dtype': 'tf.float32'}), '(tmp1, dtype=tf.float32)\n', (37387, 37411), True, 'import tensorflow as tf\n'), ((48842, 48875), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'main_grid_size'], {}), '(0, 1, main_grid_size)\n', (48853, 48875), True, 'import numpy as np\n'), ((6922, 6947), 'tensorflow.concat', 'tf.concat', (['concat_list', '(1)'], {}), '(concat_list, 1)\n', (6931, 6947), True, 'import tensorflow as tf\n'), ((14509, 14521), 'numpy.abs', 'np.abs', (['beta'], {}), '(beta)\n', (14515, 14521), True, 'import numpy as np\n'), ((15539, 15552), 'numpy.abs', 'np.abs', (['gamma'], {}), '(gamma)\n', (15545, 15552), True, 'import numpy as np\n'), ((17429, 17453), 'numpy.abs', 'np.abs', (['componment_coefs'], {}), '(componment_coefs)\n', (17435, 17453), True, 'import numpy as np\n'), ((20546, 20568), 'numpy.array', 'np.array', (['[1, 1, 1, 0]'], {}), '([1, 1, 1, 0])\n', (20554, 20568), True, 'import numpy as np\n'), ((26678, 26756), 'numpy.where', 'np.where', (['(self.main_effect_val_loss / best_loss - 1 < self.loss_threshold_main)'], {}), '(self.main_effect_val_loss / best_loss - 1 < self.loss_threshold_main)\n', (26686, 26756), True, 'import numpy as np\n'), ((28622, 28647), 'tensorflow.cast', 'tf.cast', (['tr_x', 'tf.float32'], {}), '(tr_x, tf.float32)\n', (28629, 28647), True, 'import tensorflow as tf\n'), ((28765, 28791), 'tensorflow.cast', 'tf.cast', (['val_x', 'tf.float32'], {}), '(val_x, tf.float32)\n', (28772, 28791), True, 'import tensorflow as tf\n'), ((37789, 37868), 'numpy.where', 'np.where', (['(self.interaction_val_loss / best_loss - 1 < self.loss_threshold_inter)'], {}), '(self.interaction_val_loss / best_loss - 1 < self.loss_threshold_inter)\n', (37797, 37868), True, 'import numpy as np\n'), ((45684, 45719), 'numpy.zeros', 'np.zeros', (['[self.multi_type_num + 1]'], {}), '([self.multi_type_num + 1])\n', (45692, 45719), True, 'import numpy as np\n'), ((7057, 7082), 'tensorflow.concat', 'tf.concat', (['concat_list', '(1)'], {}), '(concat_list, 1)\n', (7066, 7082), True, 'import tensorflow as tf\n'), ((15562, 15575), 'numpy.abs', 'np.abs', (['gamma'], {}), '(gamma)\n', (15568, 15575), True, 'import numpy as np\n'), ((20617, 20639), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (20625, 20639), True, 'import numpy as np\n'), ((56911, 56925), 'tensorflow.constant', 'tf.constant', (['x'], {}), '(x)\n', (56922, 56925), True, 'import tensorflow as tf\n'), ((49232, 49263), 'tensorflow.constant', 'tf.constant', (['main_effect_inputs'], {}), '(main_effect_inputs)\n', (49243, 49263), True, 'import tensorflow as tf\n'), ((50081, 50120), 'tensorflow.cast', 'tf.cast', (['main_effect_inputs', 'tf.float32'], {}), '(main_effect_inputs, tf.float32)\n', (50088, 50120), True, 'import tensorflow as tf\n'), ((57061, 57075), 'tensorflow.constant', 'tf.constant', (['x'], {}), '(x)\n', (57072, 57075), True, 'import tensorflow as tf\n')] |
import zipfile
from matplotlib.ticker import FormatStrFormatter
import matplotlib.ticker as tick
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import logging
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator,MaxNLocator
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox,HPacker,PackerBase,PaddedBox
logging.basicConfig(level = logging.INFO)
logging = logging.getLogger("ActionEventPlotter")
#os.chdir("/opt/biogears/core/build/runtime/")
class ActionEventPlotter():
def __init__(self):
self.events = []
self.data = []
self.timeData = []
self.actions = []
def plot(self, job):
"""
Plot
_________________
This function takes in job object as an argument and
tries to fill datapath,logpath with their respective
path which needed to be searched for first search takes
place in Runtime directory for csv files. Then after that
in Scenario folder in Runtime directory then finally
if not found search is done in Baseline directory
"""
if job.dataPath == None :
job.dataPath = os.path.join("Scenarios",job.verificationDirectory,"baselines")
if job.logPath == None:
job.logPath = os.path.join("Scenarios",job.verificationDirectory,"baselines")
if job.scenarioPath == None:
job.scenarioPath = os.path.join("Scenarios",job.verificationDirectory)
if job.dataFile == None:
job.dataFile=job.name+"Results.csv"
if job.logFile == None:
job.logFile = job.name + "Results.zip"
if job.scenarioFile == None:
job.scenarioFile = job.name + ".xml"
if job.outputFilename==None:
job.outputFilename=job.titleOverride+".jpg"
if len(job.outputFilename.split("."))==1:
job.outputFilename+=".jpg"
if job.imageWidth==None and job.imageHeight==None:
job.imageWidth=1600
job.imageHeight=800
if not os.path.exists(job.dataPath):
job.dataPath = os.path.join(job.basedir,job.dataPath)
if not os.path.exists(job.logPath):
job.logPath = os.path.join(job.basedir, job.logPath)
if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):
job.dataFile = job.name + "Results.zip"
if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):
job.dataPath = os.path.join(job.basedir,job.dataPath)
if not job.skipAllEvents:
self.events = self.getEventsFromLog(os.path.join(job.logPath, job.logFile),job)
if not job.skipAllActions:
self.actions = self.getActionsFromLog(os.path.join(job.logPath,job.logFile),job)
if len(self.events)>0 and len(self.actions)>0:
for i in self.events:
self.timeData.append(i["time"])
self.data.append("Event:"+i["text"])
for i in self.actions:
self.timeData.append(i["time"])
self.data.append("Actions:"+i["text"])
elif len(self.events)>0 and not len(self.actions)>0:
for i in self.events:
self.timeData.append(i["time"])
self.data.append("Event:"+i["text"])
elif not len(self.events)>0 and len(self.actions)>0:
for i in self.actions:
self.timeData.append(i["time"])
self.data.append("Actions:"+i["text"])
if not os.path.exists(os.path.dirname(job.outputDir)):
os.mkdir(os.path.dirname(job.outputDir))
if not job.fontSize:
job.fontSize=22
if job.log>=2:
logging.info("Name of Plot" +job.name)
logging.info("Input File: "+ os.path.join(job.dataPath,job.dataFile))
logging.info("Output File: "+ os.path.join(job.outputDir,job.outputFilename))
self.drawgraph(self.events,self.actions,job,os.path.join(job.dataPath,job.dataFile),
os.path.join(job.outputDir,job.outputFilename))
def getActionsFromLog(self,file_,job):
"""
getActionFromLog
_________________
This function is used to extract all Actions and it's
respective text from the log files inside datapath and
then log those appropriate to the command line
"""
fin=None
actions = []
flag=0
txt=""
try:
if file_.endswith(".zip"):
try:
zf = zipfile.ZipFile(file_,'r')
for i in zf.filelist:
if i.filename.endswith(".log"):
fin = zf.open(i.filename,'r')
break
# We expect results zips to only contain 1 text file
except IOError as e:
logging.error("ActionEventPlotter couldn't read the log file " + file_)
except IOError as e:
logging.error("Zip file not found " +file_)
if not fin:
return actions
for line in fin:
line=line.decode("utf-8")
if len(line)==0:
continue
if "[Action]" in line.split():
Action = {}
ActionText =line.split("[Action]",1)[1].strip()
ActionTimeIndex = ActionText.find("(s)")
if ActionTimeIndex == -1:
ActionTimeIndex = ActionText.find(",")
try:
Action["time"] = float(ActionText[0:ActionTimeIndex].strip())
except NumberFormatException as e:
logging.error("Couldn't correctly parse log file time to double")
Action["text"] = ActionText[ActionText.find(",") + 1:].strip()
flag=1
txt+=ActionText[ActionText.find(",") + 1:].strip()
elif flag==1 and line.startswith("\t"):
txt+=line
elif flag==1 and not line.startswith("\t"):
txt=txt.replace("\t","\n\t",1)
Action["text"]=txt
if job.logger==True and job.log>2:
logging.info("Adding Action:" + Action["text"])
actions.append(Action)
txt=""
flag=0
fin.close()
return actions
def getEventsFromLog(self, file_,job):
"""
getEventsFromLog
__________________
This function is used to extract respective event
text and time from the log file inside datapath
and then which can be used for plotting to the graph
"""
events = []
fin=None
try:
if file_.endswith(".zip"):
try:
zf = zipfile.ZipFile(file_,'r')
for i in zf.filelist:
if i.filename.endswith(".log"):
fin = zf.open(i.filename,'r')
break
except IOError as e:
logging.error("ActionEventPlotter couldn't read the log file " + file_)
except IOError as e:
logging.error("Zip File not found " +file_)
if not fin:
return events
for line in fin:
line=line.decode("utf-8")
if len(line)==0:
continue
if "[Event]" not in line.split():
continue
else:
event = {}
eventText =line.split("[Event]",1)[1].strip()
endTimeIndex = eventText.find("(s)")
if endTimeIndex == -1:
endTimeIndex = eventText.find(",")
try:
event["time"] = float(eventText[0:endTimeIndex].strip())
except NumberFormatException as e:
logging.error("Couldn't correctly parse log file time to double")
event["text"] = eventText[eventText.find(",") + 1:].strip()
if job.logger==True and job.log>2:
logging.info("Adding Event:" + event["text"])
events.append(event)
fin.close()
return events
def y_fmt(self,x, y):
"""
y_fmt
________
formatting the text to plot into the graph
"""
return '{:2.2e}'.replace("0","").format(x).replace('e', 'E').replace("+","").replace("0","")
def drawgraph(self,events,actions,job,input_zip,output_file):
"""
draw_graph
________________
For plotting the graph it calls the function which after
processing whether it is csv or zip file is used to plot
to the graph
"""
my_dpi=96
col=["red","yellow","green","blue","orange","lime","magenta","violet"
,"black","purple","0.1","0.2","0.75","0.8","0.9","pink"]
try:
if input_zip.endswith(".csv"):
df = pd.read_csv(input_zip,low_memory=False)
try:
self.plotting(events,actions,job,input_zip,output_file,df,my_dpi,col)
except IOError:
logging.error("File Not found at:"+input_zip)
except Exception as e:
logging.error("Exception occured when plotting header \"" + job.headers[0] + "\": " + str(e))
elif input_zip.endswith(".zip"):
zf=zipfile.ZipFile(input_zip)
for i in zf.filelist:
if i.filename.endswith(".csv"):
df = pd.read_csv(zf.open(i.filename),low_memory=False)
try:
self.plotting(events,actions,job,input_zip,output_file,df,my_dpi,col)
except IOError:
logging.error("File Not found at:"+input_zip)
except Exception as e:
logging.error("Exception occured when plotting header \"" + job.headers[0] + "\": " + str(e))
except IOError:
logging.error("Zip file Not found at :"+input_zip)
def plotting(self,events,actions,job,input_zip,output_file,df,my_dpi,col):
"""
plotting
________________
Main function which is a driver which uses the matplotlib plotting ability
to plot to the graph plotting is determined by the commands in config file
each command with parameters determine which if-else statement to execute
and then on the basis of those parameters executing blocks of if-else for
plotting
"""
X=df.iloc[:,0].values[::20]
Y=df.loc[:,job.headers[0]].values[::20]
df2 = None
Xexp = None
Yexp = None
plotExperimentalData = False
try:
if job.experimentalData is not None:
df2 = pd.read_csv(job.experimentalData)
Xexp = df2.iloc[:,0]
Yexp = df2.iloc[:,1]
plotExperimentalData = True
except Exception as e:
logging.info("Exception occured when opening Experimental Data: " + str(e))
if job.legendOnly:
if not os.path.exists(job.outputDir):
os.mkdir(job.outputDir)
colors =["red","yellow","green","blue","orange","lime","magenta",
"violet","black","purple","0.1","0.2","0.75","0.8","0.9","pink"]
f = lambda m,c: plt.plot([],[],marker=m, color=c, ls="none")[0]
handles = [f("_", colors[i]) for i in range(0,len(colors))]
labels = [i.replace("\t"," ") for i in self.data]
legend = plt.legend(handles, labels, loc=3, ncol=3, framealpha=1, frameon=False, fontsize=12)
plt.axis('off')
def export_legend(legend, filename=os.path.join(job.outputDir,job.outputFilename), expand=[-50,-50,50,50]):
fig = legend.figure
fig.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array(expand)))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(filename, dpi="figure", bbox_inches=bbox,pad_inches=0)
export_legend(legend)
if job.log>0:
logging.info("Creating Graph:"+job.outputFilename.split(".")[0])
plt.close("all")
else:
fig,ax = plt.subplots()
fig.set_size_inches(w=job.imageWidth/my_dpi+1,h=job.imageHeight/my_dpi+1)
if not os.path.exists(job.outputDir):
os.mkdir(job.outputDir)
if job.logAxis:
ax.set_yscale("log")
ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt))
ax.yaxis.set_ticks_position("both")
ax.yaxis.set_tick_params(labelright=True)
plt.xlabel("Time(s)",fontsize=job.fontSize)
plt.ylabel(job.headers[0],fontsize=job.fontSize)
if job.titleOverride==None:
plt.title(job.headers[0]+"_vs_Time_Action_Event_Plot",fontsize=job.fontSize)
if job.log>0:
logging.info("Creating Graph:"+job.headers[0]+"_vs_Time_Action_Event_Plot")
elif job.titleOverride=="None":
if job.log>0:
logging.info("Creating Graph:"+job.outputFilename.split(".")[0])
else:
plt.title(job.titleOverride,fontsize=job.fontSize)
if job.log>0:
logging.info("Creating Graph:"+job.titleOverride)
plt.xlim(0,max(X))
plt.plot(X, Y)
if (plotExperimentalData):
plt.plot(Xexp, Yexp)
for i in range(0,len(self.timeData)):
plt.axvline(self.timeData[i],color=col[i])
if job.showGridLines:
plt.grid(b=True, which='major', color='r', linestyle='--')
if not job.hideAELegend and not job.removeAllLegends:
legendEntries = job.headers
if (plotExperimentalData):
legendEntries.append("Experimental Data")
plt.legend(legendEntries)
if "(" and ")" in job.outputFilename:
job.outputFilename=job.outputFilename.split("(")[0]+".jpg"
plt.savefig(os.path.join(job.outputDir,job.outputFilename),dpi=my_dpi)
plt.close("all")
else:
ax.get_yaxis().set_major_locator(MaxNLocator(nbins=10,min_n_ticks=8))
ax.get_xaxis().set_major_locator(MaxNLocator(nbins=15,min_n_ticks=10))
ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt))
ax.yaxis.set_ticks_position("both")
ax.yaxis.set_tick_params(labelright=True)
plt.xlabel("Time(s)",fontsize=job.fontSize)
plt.ylabel(job.headers[0],fontsize=job.fontSize)
if job.titleOverride==None:
plt.title(job.headers[0]+"_vs_Time_Action_Event_Plot",fontsize=job.fontSize)
if job.log>0:
logging.info("Creating Graph:"+job.headers[0]+"_vs_Time_Action_Event_Plot")
elif job.titleOverride=="None":
if job.log>0:
logging.info("Creating Graph:"+job.outputFilename.split(".")[0])
else:
if job.log>0:
logging.info("Creating Graph:"+job.titleOverride)
plt.title(job.titleOverride,fontsize=job.fontSize)
plt.xlim(0,max(X))
plt.plot(X,Y)
if (plotExperimentalData):
plt.plot(Xexp, Yexp)
for i in range(0,min(len(self.timeData), len(col))):
plt.axvline(self.timeData[i],color=col[i])
if job.showGridLines:
plt.grid(b=True, which='major', color='r', linestyle='--')
if not job.hideAELegend and not job.removeAllLegends:
legendEntries = job.headers
if (plotExperimentalData):
legendEntries.append("Experimental Data")
plt.legend(legendEntries)
if "(" and ")" in job.outputFilename:
job.outputFilename=job.outputFilename.split("(")[0]+".jpg"
plt.savefig(os.path.join(job.outputDir,job.outputFilename),dpi=my_dpi)
plt.close("all")
| [
"logging.getLogger",
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"zipfile.ZipFile",
"matplotlib.pyplot.axvline",
"numpy.array",
"matplotlib.ticker.MaxNLocator",
"logging.error",
"logging.info",
"os.path.exists",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pypl... | [((378, 417), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (397, 417), False, 'import logging\n'), ((430, 469), 'logging.getLogger', 'logging.getLogger', (['"""ActionEventPlotter"""'], {}), "('ActionEventPlotter')\n", (447, 469), False, 'import logging\n'), ((1220, 1285), 'os.path.join', 'os.path.join', (['"""Scenarios"""', 'job.verificationDirectory', '"""baselines"""'], {}), "('Scenarios', job.verificationDirectory, 'baselines')\n", (1232, 1285), False, 'import os\n'), ((1351, 1416), 'os.path.join', 'os.path.join', (['"""Scenarios"""', 'job.verificationDirectory', '"""baselines"""'], {}), "('Scenarios', job.verificationDirectory, 'baselines')\n", (1363, 1416), False, 'import os\n'), ((1492, 1544), 'os.path.join', 'os.path.join', (['"""Scenarios"""', 'job.verificationDirectory'], {}), "('Scenarios', job.verificationDirectory)\n", (1504, 1544), False, 'import os\n'), ((2189, 2217), 'os.path.exists', 'os.path.exists', (['job.dataPath'], {}), '(job.dataPath)\n', (2203, 2217), False, 'import os\n'), ((2246, 2285), 'os.path.join', 'os.path.join', (['job.basedir', 'job.dataPath'], {}), '(job.basedir, job.dataPath)\n', (2258, 2285), False, 'import os\n'), ((2313, 2340), 'os.path.exists', 'os.path.exists', (['job.logPath'], {}), '(job.logPath)\n', (2327, 2340), False, 'import os\n'), ((2368, 2406), 'os.path.join', 'os.path.join', (['job.basedir', 'job.logPath'], {}), '(job.basedir, job.logPath)\n', (2380, 2406), False, 'import os\n'), ((2656, 2695), 'os.path.join', 'os.path.join', (['job.basedir', 'job.dataPath'], {}), '(job.basedir, job.dataPath)\n', (2668, 2695), False, 'import os\n'), ((3904, 3943), 'logging.info', 'logging.info', (["('Name of Plot' + job.name)"], {}), "('Name of Plot' + job.name)\n", (3916, 3943), False, 'import logging\n'), ((4167, 4207), 'os.path.join', 'os.path.join', (['job.dataPath', 'job.dataFile'], {}), '(job.dataPath, job.dataFile)\n', (4179, 4207), False, 'import os\n'), ((4231, 4278), 'os.path.join', 'os.path.join', (['job.outputDir', 'job.outputFilename'], {}), '(job.outputDir, job.outputFilename)\n', (4243, 4278), False, 'import os\n'), ((12026, 12114), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {'loc': '(3)', 'ncol': '(3)', 'framealpha': '(1)', 'frameon': '(False)', 'fontsize': '(12)'}), '(handles, labels, loc=3, ncol=3, framealpha=1, frameon=False,\n fontsize=12)\n', (12036, 12114), True, 'import matplotlib.pyplot as plt\n'), ((12123, 12138), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12131, 12138), True, 'import matplotlib.pyplot as plt\n'), ((12779, 12795), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (12788, 12795), True, 'import matplotlib.pyplot as plt\n'), ((12832, 12846), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12844, 12846), True, 'import matplotlib.pyplot as plt\n'), ((2450, 2490), 'os.path.join', 'os.path.join', (['job.dataPath', 'job.dataFile'], {}), '(job.dataPath, job.dataFile)\n', (2462, 2490), False, 'import os\n'), ((2587, 2627), 'os.path.join', 'os.path.join', (['job.dataPath', 'job.dataFile'], {}), '(job.dataPath, job.dataFile)\n', (2599, 2627), False, 'import os\n'), ((2778, 2816), 'os.path.join', 'os.path.join', (['job.logPath', 'job.logFile'], {}), '(job.logPath, job.logFile)\n', (2790, 2816), False, 'import os\n'), ((2908, 2946), 'os.path.join', 'os.path.join', (['job.logPath', 'job.logFile'], {}), '(job.logPath, job.logFile)\n', (2920, 2946), False, 'import os\n'), ((3708, 3738), 'os.path.dirname', 'os.path.dirname', (['job.outputDir'], {}), '(job.outputDir)\n', (3723, 3738), False, 'import os\n'), ((3762, 3792), 'os.path.dirname', 'os.path.dirname', (['job.outputDir'], {}), '(job.outputDir)\n', (3777, 3792), False, 'import os\n'), ((5232, 5276), 'logging.error', 'logging.error', (["('Zip file not found ' + file_)"], {}), "('Zip file not found ' + file_)\n", (5245, 5276), False, 'import logging\n'), ((7445, 7489), 'logging.error', 'logging.error', (["('Zip File not found ' + file_)"], {}), "('Zip File not found ' + file_)\n", (7458, 7489), False, 'import logging\n'), ((9302, 9342), 'pandas.read_csv', 'pd.read_csv', (['input_zip'], {'low_memory': '(False)'}), '(input_zip, low_memory=False)\n', (9313, 9342), True, 'import pandas as pd\n'), ((10411, 10463), 'logging.error', 'logging.error', (["('Zip file Not found at :' + input_zip)"], {}), "('Zip file Not found at :' + input_zip)\n", (10424, 10463), False, 'import logging\n'), ((11239, 11272), 'pandas.read_csv', 'pd.read_csv', (['job.experimentalData'], {}), '(job.experimentalData)\n', (11250, 11272), True, 'import pandas as pd\n'), ((11557, 11586), 'os.path.exists', 'os.path.exists', (['job.outputDir'], {}), '(job.outputDir)\n', (11571, 11586), False, 'import os\n'), ((11604, 11627), 'os.mkdir', 'os.mkdir', (['job.outputDir'], {}), '(job.outputDir)\n', (11612, 11627), False, 'import os\n'), ((12186, 12233), 'os.path.join', 'os.path.join', (['job.outputDir', 'job.outputFilename'], {}), '(job.outputDir, job.outputFilename)\n', (12198, 12233), False, 'import os\n'), ((12965, 12994), 'os.path.exists', 'os.path.exists', (['job.outputDir'], {}), '(job.outputDir)\n', (12979, 12994), False, 'import os\n'), ((13012, 13035), 'os.mkdir', 'os.mkdir', (['job.outputDir'], {}), '(job.outputDir)\n', (13020, 13035), False, 'import os\n'), ((13317, 13361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time(s)"""'], {'fontsize': 'job.fontSize'}), "('Time(s)', fontsize=job.fontSize)\n", (13327, 13361), True, 'import matplotlib.pyplot as plt\n'), ((13377, 13426), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['job.headers[0]'], {'fontsize': 'job.fontSize'}), '(job.headers[0], fontsize=job.fontSize)\n', (13387, 13426), True, 'import matplotlib.pyplot as plt\n'), ((14141, 14155), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (14149, 14155), True, 'import matplotlib.pyplot as plt\n'), ((15056, 15072), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (15065, 15072), True, 'import matplotlib.pyplot as plt\n'), ((15467, 15511), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time(s)"""'], {'fontsize': 'job.fontSize'}), "('Time(s)', fontsize=job.fontSize)\n", (15477, 15511), True, 'import matplotlib.pyplot as plt\n'), ((15527, 15576), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['job.headers[0]'], {'fontsize': 'job.fontSize'}), '(job.headers[0], fontsize=job.fontSize)\n', (15537, 15576), True, 'import matplotlib.pyplot as plt\n'), ((16291, 16305), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (16299, 16305), True, 'import matplotlib.pyplot as plt\n'), ((17191, 17207), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (17200, 17207), True, 'import matplotlib.pyplot as plt\n'), ((3984, 4024), 'os.path.join', 'os.path.join', (['job.dataPath', 'job.dataFile'], {}), '(job.dataPath, job.dataFile)\n', (3996, 4024), False, 'import os\n'), ((4067, 4114), 'os.path.join', 'os.path.join', (['job.outputDir', 'job.outputFilename'], {}), '(job.outputDir, job.outputFilename)\n', (4079, 4114), False, 'import os\n'), ((4763, 4790), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file_', '"""r"""'], {}), "(file_, 'r')\n", (4778, 4790), False, 'import zipfile\n'), ((7058, 7085), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file_', '"""r"""'], {}), "(file_, 'r')\n", (7073, 7085), False, 'import zipfile\n'), ((8376, 8421), 'logging.info', 'logging.info', (["('Adding Event:' + event['text'])"], {}), "('Adding Event:' + event['text'])\n", (8388, 8421), False, 'import logging\n'), ((9769, 9795), 'zipfile.ZipFile', 'zipfile.ZipFile', (['input_zip'], {}), '(input_zip)\n', (9784, 9795), False, 'import zipfile\n'), ((11820, 11866), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'marker': 'm', 'color': 'c', 'ls': '"""none"""'}), "([], [], marker=m, color=c, ls='none')\n", (11828, 11866), True, 'import matplotlib.pyplot as plt\n'), ((13159, 13189), 'matplotlib.ticker.FuncFormatter', 'tick.FuncFormatter', (['self.y_fmt'], {}), '(self.y_fmt)\n', (13177, 13189), True, 'import matplotlib.ticker as tick\n'), ((13507, 13586), 'matplotlib.pyplot.title', 'plt.title', (["(job.headers[0] + '_vs_Time_Action_Event_Plot')"], {'fontsize': 'job.fontSize'}), "(job.headers[0] + '_vs_Time_Action_Event_Plot', fontsize=job.fontSize)\n", (13516, 13586), True, 'import matplotlib.pyplot as plt\n'), ((14236, 14256), 'matplotlib.pyplot.plot', 'plt.plot', (['Xexp', 'Yexp'], {}), '(Xexp, Yexp)\n', (14244, 14256), True, 'import matplotlib.pyplot as plt\n'), ((14332, 14375), 'matplotlib.pyplot.axvline', 'plt.axvline', (['self.timeData[i]'], {'color': 'col[i]'}), '(self.timeData[i], color=col[i])\n', (14343, 14375), True, 'import matplotlib.pyplot as plt\n'), ((14450, 14508), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(b=True, which='major', color='r', linestyle='--')\n", (14458, 14508), True, 'import matplotlib.pyplot as plt\n'), ((14777, 14802), 'matplotlib.pyplot.legend', 'plt.legend', (['legendEntries'], {}), '(legendEntries)\n', (14787, 14802), True, 'import matplotlib.pyplot as plt\n'), ((14981, 15028), 'os.path.join', 'os.path.join', (['job.outputDir', 'job.outputFilename'], {}), '(job.outputDir, job.outputFilename)\n', (14993, 15028), False, 'import os\n'), ((15140, 15176), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'nbins': '(10)', 'min_n_ticks': '(8)'}), '(nbins=10, min_n_ticks=8)\n', (15151, 15176), False, 'from matplotlib.ticker import LinearLocator, MaxNLocator\n'), ((15226, 15263), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'nbins': '(15)', 'min_n_ticks': '(10)'}), '(nbins=15, min_n_ticks=10)\n', (15237, 15263), False, 'from matplotlib.ticker import LinearLocator, MaxNLocator\n'), ((15309, 15339), 'matplotlib.ticker.FuncFormatter', 'tick.FuncFormatter', (['self.y_fmt'], {}), '(self.y_fmt)\n', (15327, 15339), True, 'import matplotlib.ticker as tick\n'), ((15657, 15736), 'matplotlib.pyplot.title', 'plt.title', (["(job.headers[0] + '_vs_Time_Action_Event_Plot')"], {'fontsize': 'job.fontSize'}), "(job.headers[0] + '_vs_Time_Action_Event_Plot', fontsize=job.fontSize)\n", (15666, 15736), True, 'import matplotlib.pyplot as plt\n'), ((16373, 16393), 'matplotlib.pyplot.plot', 'plt.plot', (['Xexp', 'Yexp'], {}), '(Xexp, Yexp)\n', (16381, 16393), True, 'import matplotlib.pyplot as plt\n'), ((16484, 16527), 'matplotlib.pyplot.axvline', 'plt.axvline', (['self.timeData[i]'], {'color': 'col[i]'}), '(self.timeData[i], color=col[i])\n', (16495, 16527), True, 'import matplotlib.pyplot as plt\n'), ((16585, 16643), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(b=True, which='major', color='r', linestyle='--')\n", (16593, 16643), True, 'import matplotlib.pyplot as plt\n'), ((16912, 16937), 'matplotlib.pyplot.legend', 'plt.legend', (['legendEntries'], {}), '(legendEntries)\n', (16922, 16937), True, 'import matplotlib.pyplot as plt\n'), ((17116, 17163), 'os.path.join', 'os.path.join', (['job.outputDir', 'job.outputFilename'], {}), '(job.outputDir, job.outputFilename)\n', (17128, 17163), False, 'import os\n'), ((5119, 5190), 'logging.error', 'logging.error', (['("ActionEventPlotter couldn\'t read the log file " + file_)'], {}), '("ActionEventPlotter couldn\'t read the log file " + file_)\n', (5132, 5190), False, 'import logging\n'), ((5907, 5972), 'logging.error', 'logging.error', (['"""Couldn\'t correctly parse log file time to double"""'], {}), '("Couldn\'t correctly parse log file time to double")\n', (5920, 5972), False, 'import logging\n'), ((7332, 7403), 'logging.error', 'logging.error', (['("ActionEventPlotter couldn\'t read the log file " + file_)'], {}), '("ActionEventPlotter couldn\'t read the log file " + file_)\n', (7345, 7403), False, 'import logging\n'), ((8163, 8228), 'logging.error', 'logging.error', (['"""Couldn\'t correctly parse log file time to double"""'], {}), '("Couldn\'t correctly parse log file time to double")\n', (8176, 8228), False, 'import logging\n'), ((9505, 9552), 'logging.error', 'logging.error', (["('File Not found at:' + input_zip)"], {}), "('File Not found at:' + input_zip)\n", (9518, 9552), False, 'import logging\n'), ((13642, 13721), 'logging.info', 'logging.info', (["('Creating Graph:' + job.headers[0] + '_vs_Time_Action_Event_Plot')"], {}), "('Creating Graph:' + job.headers[0] + '_vs_Time_Action_Event_Plot')\n", (13654, 13721), False, 'import logging\n'), ((13931, 13982), 'matplotlib.pyplot.title', 'plt.title', (['job.titleOverride'], {'fontsize': 'job.fontSize'}), '(job.titleOverride, fontsize=job.fontSize)\n', (13940, 13982), True, 'import matplotlib.pyplot as plt\n'), ((15792, 15871), 'logging.info', 'logging.info', (["('Creating Graph:' + job.headers[0] + '_vs_Time_Action_Event_Plot')"], {}), "('Creating Graph:' + job.headers[0] + '_vs_Time_Action_Event_Plot')\n", (15804, 15871), False, 'import logging\n'), ((16189, 16240), 'matplotlib.pyplot.title', 'plt.title', (['job.titleOverride'], {'fontsize': 'job.fontSize'}), '(job.titleOverride, fontsize=job.fontSize)\n', (16198, 16240), True, 'import matplotlib.pyplot as plt\n'), ((6430, 6477), 'logging.info', 'logging.info', (["('Adding Action:' + Action['text'])"], {}), "('Adding Action:' + Action['text'])\n", (6442, 6477), False, 'import logging\n'), ((12439, 12455), 'numpy.array', 'np.array', (['expand'], {}), '(expand)\n', (12447, 12455), True, 'import numpy as np\n'), ((14040, 14091), 'logging.info', 'logging.info', (["('Creating Graph:' + job.titleOverride)"], {}), "('Creating Graph:' + job.titleOverride)\n", (14052, 14091), False, 'import logging\n'), ((16119, 16170), 'logging.info', 'logging.info', (["('Creating Graph:' + job.titleOverride)"], {}), "('Creating Graph:' + job.titleOverride)\n", (16131, 16170), False, 'import logging\n'), ((10160, 10207), 'logging.error', 'logging.error', (["('File Not found at:' + input_zip)"], {}), "('File Not found at:' + input_zip)\n", (10173, 10207), False, 'import logging\n')] |
import numpy as np
from src.network_elements.network_element import NetworkElement
class Sigmoid(NetworkElement):
def __init__(self) -> None:
self.current_layer_output = None
def sigmoid(self, Z):
return 1 / (1 - np.exp(Z))
def sigmoid_derivative(self, Z):
return self.sigmoid(Z) * (1 - self.sigmoid(Z))
def forward_propagate(self, Z):
self.current_layer_output = Z
return self.sigmoid(Z)
def backward_propagate(self, dLdZ):
if self.current_layer_output is None:
raise ValueError("Please forward propagate information before backward propagating.")
return dLdZ * self.sigmoid_derivative(self.current_layer_output) | [
"numpy.exp"
] | [((234, 243), 'numpy.exp', 'np.exp', (['Z'], {}), '(Z)\n', (240, 243), True, 'import numpy as np\n')] |
import sys
sys.path.append('../')
import constants as cnst
import os
import torch
import tqdm
import numpy as np
import constants
SHAPE = [0, 1, 2]
EXP = [50, 51, 52]
POSE = [150, 151, 152, 153, 154, 155]
def centre_using_nearest(flame_seq, flame_dataset, one_translation_for_whole_seq=True):
shape_weigth = 0
pose_weight = 0.7
if one_translation_for_whole_seq:
dist = np.linalg.norm(flame_dataset[:, 150:156] - flame_seq[0, 150:156], axis=-1)
min_arg = np.argmin(dist)
flame_seq[:, 156:] = flame_dataset[min_arg, 156:]
else:
for i in range(len(flame_seq)):
shape_dist = np.linalg.norm(flame_dataset[:, SHAPE] - flame_seq[i, SHAPE], axis=-1)
pose_dist = np.linalg.norm(flame_dataset[:, POSE] - flame_seq[i, POSE], axis=-1)
dist = shape_weigth*shape_dist + pose_weight*pose_dist
min_arg = np.argmin(dist)
flame_seq[i, 156:] = flame_dataset[min_arg, 156:]
return flame_seq
def position_to_given_location(deca_flame_decoder, flame_batch):
# import ipdb;
# ipdb.set_trace()
shape, expression, pose = (flame_batch[:, 0:100], flame_batch[:, 100:150], flame_batch[:, 150:156])
verts, _, _ = deca_flame_decoder(shape_params=shape, expression_params=expression, pose_params=pose)
for i in range(verts.shape[0]):
e_1_3d = verts[i, 4051, :]
e_2_3d = verts[i, 4597, :]
eye_3d_mat = torch.zeros(size=(3, 4)).to(flame_batch.device)
eye_3d_mat[1, 0] = eye_3d_mat[1, 1] = eye_3d_mat[2, 2] = eye_3d_mat[2, 3] = 1
eye_3d_mat[0, 0] = e_1_3d[0]
eye_3d_mat[0, 1] = e_2_3d[0]
eye_3d_mat[0, 2] = e_1_3d[1]
eye_3d_mat[0, 3] = e_2_3d[1]
normalized_image_desired_positions_x1_x2_y1_y2 = \
torch.tensor([-0.2419, 0.2441, 0.0501-0.1, 0.0509-0.1]).to(flame_batch.device)
s, s_b_x, s_b_y = torch.matmul(normalized_image_desired_positions_x1_x2_y1_y2, torch.pinverse(eye_3d_mat))
b_x = s_b_x/s
b_y = s_b_y/s
s = -s
# import ipdb;
# ipdb.set_trace()
flame_batch[i, 156] = s
flame_batch[i, 157] = b_x
flame_batch[i, 158] = b_y
return flame_batch
def translate_to_center_eye(flame_decoder, flame_params, original_flame):
shape, expression, pose, translation = (flame_params[:, 0:100,], flame_params[:, 100:150], flame_params[:, 150:156],
flame_params[:, 156:159])
verts, _ = flame_decoder(shape_params=shape, expression_params=expression, pose_params=pose,
translation=translation*0)
if original_flame is not None:
shape_orig, expression_orig, pose_orig, translation_orig = (original_flame[:, 0:100,],
original_flame[:, 100:150],
original_flame[:, 150:156],
original_flame[:, 156:159])
verts_orig, _ = flame_decoder(shape_params=shape_orig, expression_params=expression_orig,
pose_params=pose_orig, translation=translation_orig)
desired_cntr_of_the_eyes = verts_orig[:, 3666, :]
else:
desired_cntr_of_the_eyes = torch.from_numpy(np.array([4.32830852e-02, -47.60086733e-03, 2.41298008e+00])
.astype('float32')).to(flame_params.device)
# desired_cntr_of_the_eyes = torch.from_numpy(np.array([2.2427477e-03, -1.8124590e-02, 2.5114515e+00])
# .astype('float32')).to(flame_params.device)
current_translation = verts[:, 3666, :]
required_translation = desired_cntr_of_the_eyes - current_translation
return torch.cat((shape, expression, pose, required_translation), dim=1)
class RegressorNNSkipPart(torch.nn.Module):
def __init__(self, neurons, regularization, num_layers_per_block, activation_type):
super().__init__()
layers = []
for layer_idx in range(num_layers_per_block):
layers.append(torch.nn.Linear(neurons, neurons, bias=True))
if regularization == 'dropout':
layers.append(torch.nn.Dropout(0.5))
elif regularization == 'batchnorm':
layers.append(torch.nn.BatchNorm1d(neurons))
elif regularization is None:
pass
if activation_type == 'relu':
layers.append(torch.nn.ReLU(True))
elif activation_type == 'lrelu':
layers.append(torch.nn.LeakyReLU(0.3))
self.forward_part = torch.nn.Sequential(*layers)
def forward(self, input):
return input + self.forward_part(input)
class EyeCenteringByRegression:
def __init__(self, eval_mode=False, make_cuda=False, num_skip_blks=2, intermediate_neurons=512,
regularization='batchnorm', num_layers_per_block=2, activation_type='relu'):
self.mean_input = torch.from_numpy(np.array([ 0.4671627 , -0.09504398, -0.12090819,
1.2735702 , 0.00253953, -0.02751609,
0.10822426, -0.01990774, 0.00626311,
0.08915882, 0.00973385, -0.00834262]).astype('float32'))
self.std_input = torch.from_numpy(np.array([0.53506327, 0.52815205, 0.52134556,
1.1373067 , 0.4865559 , 0.21345851,
0.11624492, 0.27343082, 0.02041259,
0.05613742, 0.01074448, 0.03475167]).astype('float32'))
self.mean_output= torch.from_numpy(np.array([8.0179777e+00, 3.4307071e-03, -1.3698899e-04]).astype('float32'))
self.std_output = torch.from_numpy(np.array([0.38766932, 0.03351782, 0.01525018]).astype('float32'))
self.random_model = True
self.model = torch.nn.Sequential(
torch.nn.Linear(len(SHAPE + EXP + POSE), intermediate_neurons, bias=True),
torch.nn.BatchNorm1d(intermediate_neurons),
torch.nn.ReLU(True),
*[RegressorNNSkipPart(intermediate_neurons, regularization=regularization,
num_layers_per_block=num_layers_per_block, activation_type=activation_type)
for skip_blk_id in range(num_skip_blks)],
torch.nn.Linear(intermediate_neurons, 3, bias=True),
)
if make_cuda:
self.device = 'cuda'
self.model = self.model.cuda()
else:
self.device = 'cpu'
self.eval_mode = eval_mode
if eval_mode:
self.model.eval()
self.mdl_optim = torch.optim.Adam(self.model.parameters(), lr=1e-4, betas=(0.0, 0.99))
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.mdl_optim, 'min', factor=0.5, patience=5, verbose=True, threshold=0.0001, min_lr=1e-6)
def load_model(self, checkpoint_path):
self.random_model = False
self.model.load_state_dict(torch.load(checkpoint_path))
def save_model(self, checkpoint_path):
os.makedirs(os.path.dirname(checkpoint_path), exist_ok=True)
torch.save(self.model.state_dict(), checkpoint_path)
def get_camera(self, pose_shape_exp):
if self.random_model:
raise ValueError('Using model inference without training or loading it')
with torch.no_grad():
self.mean_input = self.mean_input.to(pose_shape_exp.device)
self.std_input = self.std_input.to(pose_shape_exp.device)
self.std_output = self.std_output.to(pose_shape_exp.device)
self.mean_output = self.mean_output.to(pose_shape_exp.device)
t = (self.model((pose_shape_exp - self.mean_input) / self.std_input) * self.std_output) + self.mean_output
return t
def substitute_flame_batch_with_regressed_camera(self, flame_batch):
t_cam = self.get_camera(flame_batch[:, SHAPE+EXP+POSE])
flame_batch[:, constants.get_idx_list('TRANS')] = t_cam
return flame_batch
def fit_to_data(self, trn_dataloader, epochs=20, verbose=True, training_criterion=torch.nn.MSELoss(),
validation_loader=None, save_best_mdl_path=None):
assert not self.eval_mode
validation_criterion = torch.nn.MSELoss()
self.random_model = False
trn_dataloader_itr = iter(trn_dataloader)
validation_loss = 0
best_validation_loss = np.inf
for epoch_id in range(epochs):
moving_avg_trn_loss = 0
self.model.train()
if verbose:
pbar = tqdm.tqdm(range(len(trn_dataloader)))
else:
pbar = range(len(trn_dataloader))
for batch_id in pbar:
try:
x_train, y_train = next(trn_dataloader_itr)
except (OSError, StopIteration):
trn_dataloader_itr = iter(trn_dataloader)
x_train, y_train = next(trn_dataloader_itr)
x_train = (x_train - self.mean_input)/self.std_input
y_train = (y_train - self.mean_output)/self.std_output
x_train = x_train.to(self.device)
y_train = y_train.to(self.device)
# import ipdb;
# ipdb.set_trace()
y_hat_train = self.model(x_train)
train_loss = training_criterion(y_hat_train, y_train)
train_loss.backward()
self.mdl_optim.step()
moving_avg_trn_loss += train_loss.item()
state_msg = f'[{epoch_id}/{epochs}] Train_loss: {moving_avg_trn_loss/(batch_id+1):.3f} ' \
f'Valid_loss: {validation_loss:0.3f}'
if verbose:
pbar.set_description(state_msg)
# import ipdb; ipdb.set_trace()
if validation_loader is not None:
validation_loss = 0
num_batches = 0
validation_loader_itr = iter(validation_loader)
# import ipdb; ipdb.set_trace()
self.model.eval()
with torch.no_grad():
for x_valid, y_valid in validation_loader_itr:
x_valid = (x_valid - self.mean_input) / self.std_input
y_valid = (y_valid - self.mean_output) / self.std_output
x_valid = x_valid.to(self.device)
y_valid = y_valid.to(self.device)
num_batches += 1
y_hat_valid = self.model(x_valid)
valid_loss = validation_criterion(y_hat_valid, y_valid)
validation_loss += valid_loss
validation_loss /= num_batches
self.lr_scheduler.step(validation_loss)
if save_best_mdl_path is not None and validation_loader is not None:
if best_validation_loss > validation_loss:
best_validation_loss = validation_loss
self.save_model(save_best_mdl_path)
print(f'New best model saved to {save_best_mdl_path}')
def get_eye_center_camera(self, current_shape_exp_pose):
return self.model(current_shape_exp_pose)
if __name__ == '__main__':
''' Regressor training code'''
from torch.utils.data import Dataset, DataLoader
class FlmDatLoader(Dataset):
def __init__(self, keys, param_dict):
self.param_dict = param_dict
self.keys = keys
self.list_bad_images = np.load(cnst.list_deca_failed_iamges)['bad_images']
def __getitem__(self, index):
curren_file = str(index).zfill(5) + '.npy'
while curren_file in self.list_bad_images:
index = np.random.randint(0, len(self.keys))
curren_file = str(index).zfill(5) + '.npy'
shape_exp_pose = np.concatenate((self.param_dict[keys[index]]['shape'][:3],
self.param_dict[keys[index]]['exp'][:3],
self.param_dict[keys[index]]['pose']), axis=-1)
t_cam = self.param_dict[keys[index]]['cam']
return shape_exp_pose, t_cam
def __len__(self):
return len(self.keys)
params_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
keys = []
for key in params_dict.keys():
keys.append(key)
keys = np.array(keys)
validation_fraction = 0.3
# import ipdb; ipdb.set_trace()
train_keys = keys[:int(len(keys) * (1 - validation_fraction))]
validation_keys = keys[int(len(keys) * (1 - validation_fraction)):]
train_set = FlmDatLoader(train_keys, params_dict)
train_loader = DataLoader(train_set, shuffle=True, batch_size=64, num_workers=0, drop_last=True,
pin_memory=True)
valid_set = FlmDatLoader(validation_keys, params_dict)
validation_loader = DataLoader(valid_set, shuffle=True, batch_size=128, num_workers=0, drop_last=True,
pin_memory=True)
# eye_cntr_reg = EyeCenteringByRegression(num_skip_blks=2, intermediate_neurons=512, regularization='batchnorm',
# num_layers_per_block=2, activation_type='relu')
eye_cntr_reg = EyeCenteringByRegression(make_cuda=True, num_skip_blks=2, intermediate_neurons=825,
regularization='batchnorm', num_layers_per_block=1,
activation_type='relu')
try:
eye_cntr_reg.fit_to_data(trn_dataloader=train_loader, validation_loader=validation_loader, epochs=200,
save_best_mdl_path='../checkpoint/eye_centering/cntr_flm_param_to_cam.mdl',
training_criterion=torch.nn.MSELoss())
finally:
eye_cntr_reg.save_model('../checkpoint/eye_centering/cntr_eye_flm_param_to_cam_last.mdl')
print('..................Model saved .................')
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"numpy.array",
"torch.nn.MSELoss",
"torch.nn.BatchNorm1d",
"numpy.linalg.norm",
"sys.path.append",
"constants.get_idx_list",
"torch.pinverse",
"numpy.concatenate",
"numpy.argmin",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"tor... | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((3881, 3946), 'torch.cat', 'torch.cat', (['(shape, expression, pose, required_translation)'], {'dim': '(1)'}), '((shape, expression, pose, required_translation), dim=1)\n', (3890, 3946), False, 'import torch\n'), ((12783, 12797), 'numpy.array', 'np.array', (['keys'], {}), '(keys)\n', (12791, 12797), True, 'import numpy as np\n'), ((13077, 13180), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'shuffle': '(True)', 'batch_size': '(64)', 'num_workers': '(0)', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(train_set, shuffle=True, batch_size=64, num_workers=0, drop_last\n =True, pin_memory=True)\n', (13087, 13180), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13290, 13393), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_set'], {'shuffle': '(True)', 'batch_size': '(128)', 'num_workers': '(0)', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(valid_set, shuffle=True, batch_size=128, num_workers=0,\n drop_last=True, pin_memory=True)\n', (13300, 13393), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((394, 468), 'numpy.linalg.norm', 'np.linalg.norm', (['(flame_dataset[:, 150:156] - flame_seq[0, 150:156])'], {'axis': '(-1)'}), '(flame_dataset[:, 150:156] - flame_seq[0, 150:156], axis=-1)\n', (408, 468), True, 'import numpy as np\n'), ((487, 502), 'numpy.argmin', 'np.argmin', (['dist'], {}), '(dist)\n', (496, 502), True, 'import numpy as np\n'), ((4746, 4774), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*layers'], {}), '(*layers)\n', (4765, 4774), False, 'import torch\n'), ((7010, 7150), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['self.mdl_optim', '"""min"""'], {'factor': '(0.5)', 'patience': '(5)', 'verbose': '(True)', 'threshold': '(0.0001)', 'min_lr': '(1e-06)'}), "(self.mdl_optim, 'min', factor=\n 0.5, patience=5, verbose=True, threshold=0.0001, min_lr=1e-06)\n", (7052, 7150), False, 'import torch\n'), ((8402, 8420), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (8418, 8420), False, 'import torch\n'), ((8557, 8575), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (8573, 8575), False, 'import torch\n'), ((636, 706), 'numpy.linalg.norm', 'np.linalg.norm', (['(flame_dataset[:, SHAPE] - flame_seq[i, SHAPE])'], {'axis': '(-1)'}), '(flame_dataset[:, SHAPE] - flame_seq[i, SHAPE], axis=-1)\n', (650, 706), True, 'import numpy as np\n'), ((731, 799), 'numpy.linalg.norm', 'np.linalg.norm', (['(flame_dataset[:, POSE] - flame_seq[i, POSE])'], {'axis': '(-1)'}), '(flame_dataset[:, POSE] - flame_seq[i, POSE], axis=-1)\n', (745, 799), True, 'import numpy as np\n'), ((889, 904), 'numpy.argmin', 'np.argmin', (['dist'], {}), '(dist)\n', (898, 904), True, 'import numpy as np\n'), ((1957, 1983), 'torch.pinverse', 'torch.pinverse', (['eye_3d_mat'], {}), '(eye_3d_mat)\n', (1971, 1983), False, 'import torch\n'), ((6246, 6288), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['intermediate_neurons'], {}), '(intermediate_neurons)\n', (6266, 6288), False, 'import torch\n'), ((6302, 6321), 'torch.nn.ReLU', 'torch.nn.ReLU', (['(True)'], {}), '(True)\n', (6315, 6321), False, 'import torch\n'), ((6590, 6641), 'torch.nn.Linear', 'torch.nn.Linear', (['intermediate_neurons', '(3)'], {'bias': '(True)'}), '(intermediate_neurons, 3, bias=True)\n', (6605, 6641), False, 'import torch\n'), ((7271, 7298), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (7281, 7298), False, 'import torch\n'), ((7364, 7396), 'os.path.dirname', 'os.path.dirname', (['checkpoint_path'], {}), '(checkpoint_path)\n', (7379, 7396), False, 'import os\n'), ((7645, 7660), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7658, 7660), False, 'import torch\n'), ((12216, 12368), 'numpy.concatenate', 'np.concatenate', (["(self.param_dict[keys[index]]['shape'][:3], self.param_dict[keys[index]][\n 'exp'][:3], self.param_dict[keys[index]]['pose'])"], {'axis': '(-1)'}), "((self.param_dict[keys[index]]['shape'][:3], self.param_dict[\n keys[index]]['exp'][:3], self.param_dict[keys[index]]['pose']), axis=-1)\n", (12230, 12368), True, 'import numpy as np\n'), ((12634, 12688), 'numpy.load', 'np.load', (['cnst.all_flame_params_file'], {'allow_pickle': '(True)'}), '(cnst.all_flame_params_file, allow_pickle=True)\n', (12641, 12688), True, 'import numpy as np\n'), ((1436, 1460), 'torch.zeros', 'torch.zeros', ([], {'size': '(3, 4)'}), '(size=(3, 4))\n', (1447, 1460), False, 'import torch\n'), ((1790, 1849), 'torch.tensor', 'torch.tensor', (['[-0.2419, 0.2441, 0.0501 - 0.1, 0.0509 - 0.1]'], {}), '([-0.2419, 0.2441, 0.0501 - 0.1, 0.0509 - 0.1])\n', (1802, 1849), False, 'import torch\n'), ((4209, 4253), 'torch.nn.Linear', 'torch.nn.Linear', (['neurons', 'neurons'], {'bias': '(True)'}), '(neurons, neurons, bias=True)\n', (4224, 4253), False, 'import torch\n'), ((8247, 8278), 'constants.get_idx_list', 'constants.get_idx_list', (['"""TRANS"""'], {}), "('TRANS')\n", (8269, 8278), False, 'import constants\n'), ((11865, 11902), 'numpy.load', 'np.load', (['cnst.list_deca_failed_iamges'], {}), '(cnst.list_deca_failed_iamges)\n', (11872, 11902), True, 'import numpy as np\n'), ((14180, 14198), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (14196, 14198), False, 'import torch\n'), ((4329, 4350), 'torch.nn.Dropout', 'torch.nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (4345, 4350), False, 'import torch\n'), ((4596, 4615), 'torch.nn.ReLU', 'torch.nn.ReLU', (['(True)'], {}), '(True)\n', (4609, 4615), False, 'import torch\n'), ((5125, 5291), 'numpy.array', 'np.array', (['[0.4671627, -0.09504398, -0.12090819, 1.2735702, 0.00253953, -0.02751609, \n 0.10822426, -0.01990774, 0.00626311, 0.08915882, 0.00973385, -0.00834262]'], {}), '([0.4671627, -0.09504398, -0.12090819, 1.2735702, 0.00253953, -\n 0.02751609, 0.10822426, -0.01990774, 0.00626311, 0.08915882, 0.00973385,\n -0.00834262])\n', (5133, 5291), True, 'import numpy as np\n'), ((5512, 5673), 'numpy.array', 'np.array', (['[0.53506327, 0.52815205, 0.52134556, 1.1373067, 0.4865559, 0.21345851, \n 0.11624492, 0.27343082, 0.02041259, 0.05613742, 0.01074448, 0.03475167]'], {}), '([0.53506327, 0.52815205, 0.52134556, 1.1373067, 0.4865559, \n 0.21345851, 0.11624492, 0.27343082, 0.02041259, 0.05613742, 0.01074448,\n 0.03475167])\n', (5520, 5673), True, 'import numpy as np\n'), ((5885, 5936), 'numpy.array', 'np.array', (['[8.0179777, 0.0034307071, -0.00013698899]'], {}), '([8.0179777, 0.0034307071, -0.00013698899])\n', (5893, 5936), True, 'import numpy as np\n'), ((6005, 6051), 'numpy.array', 'np.array', (['[0.38766932, 0.03351782, 0.01525018]'], {}), '([0.38766932, 0.03351782, 0.01525018])\n', (6013, 6051), True, 'import numpy as np\n'), ((10420, 10435), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10433, 10435), False, 'import torch\n'), ((4430, 4459), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['neurons'], {}), '(neurons)\n', (4450, 4459), False, 'import torch\n'), ((4692, 4715), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', (['(0.3)'], {}), '(0.3)\n', (4710, 4715), False, 'import torch\n'), ((3383, 3435), 'numpy.array', 'np.array', (['[0.0432830852, -0.04760086733, 2.41298008]'], {}), '([0.0432830852, -0.04760086733, 2.41298008])\n', (3391, 3435), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.metrics import mean_squared_error, accuracy_score
class BaseModel(object):
"""
Base model to run the test
"""
def __init__(self):
self.max_depth = 6
self.learning_rate = 1
self.min_split_loss = 1
self.min_weight = 1
self.L1_reg = 1
self.L2_reg = 1
self.num_rounds = 40
self.max_bin = 255
self.use_gpu = True
self.params = {}
self.model = None # self.model is different with different libraries
def _config_model(self, data):
"""
To config the model with different params
"""
pass
def _train_model(self, data):
"""
To train model
:param data:
:return:
"""
pass
def _predict(self, data):
pass
def eval(self, data, pred):
"""
To eval the predict results with specified metric
:param data:
:param pred:
:return:
"""
if data.metric == "RMSE":
with open('pred', 'w') as f:
for x in pred:
f.write(str(x) + '\n')
return np.sqrt(mean_squared_error(data.y_test, pred))
elif data.metric == "Accuracy":
# Threshold prediction if binary classification
if data.task == "Classification":
pred = pred > 0.5
elif data.task == "Multiclass classification":
if pred.ndim > 1:
pred = np.argmax(pred, axis=1)
return accuracy_score(data.y_test, pred)
else:
raise ValueError("Unknown metric: " + data.metric)
def run_model(self, data):
"""
To run model
:param data:
:return:
"""
self._config_model(data)
elapsed = self._train_model(data)
# metric = 0
metric = self._predict(data)
print("##### Elapsed time: %.5f #####" % (elapsed))
print("##### Predict %s: %.4f #####" % (data.metric, metric))
return elapsed, metric
def model_name(self):
pass
| [
"numpy.argmax",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.mean_squared_error"
] | [((1188, 1225), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['data.y_test', 'pred'], {}), '(data.y_test, pred)\n', (1206, 1225), False, 'from sklearn.metrics import mean_squared_error, accuracy_score\n'), ((1570, 1603), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['data.y_test', 'pred'], {}), '(data.y_test, pred)\n', (1584, 1603), False, 'from sklearn.metrics import mean_squared_error, accuracy_score\n'), ((1527, 1550), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (1536, 1550), True, 'import numpy as np\n')] |
import os
import sys
import glob
from comet_ml import Experiment, OfflineExperiment
import logging
import warnings
import pickle
from argparse import ArgumentParser
warnings.simplefilter(action="ignore")
import functools
import numpy as np
import pandas as pd
# from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
import torch
import torchnet as tnt
import torch.nn as nn
import matplotlib
np.random.seed(42)
torch.cuda.empty_cache()
# We import from other files
from config import args
from utils.utils import *
from data_loader.loader import *
from utils.load_data import load_pseudo_labelled_datasets
from learning.loss_functions import *
from learning.kde_mixture import KdeMixture
from learning.accuracy import log_last_stats_of_fold, post_cross_validation_logging
from learning.train import train_full, initialize_model
from model.point_net2 import PointNet2
from learning.kde_mixture import get_fitted_kde_mixture_from_dataset
np.random.seed(42)
torch.cuda.empty_cache()
# fmt: off
parser = ArgumentParser(description="Pre-Training")
parser.add_argument("--n_epoch", default=200 if not args.mode == "DEV" else 2, type=int, help="Number of training epochs",)
parser.add_argument("--n_epoch_test", default=1 if not args.mode == "DEV" else 1, type=int, help="We evaluate every -th epoch, and every epoch after epoch_to_start_early_stop",)
parser.add_argument("--epoch_to_start_early_stop", default=1 if not args.mode == "DEV" else 1, type=int, help="Epoch from which to start early stopping process, after ups and down of training.",)
parser.add_argument("--patience_in_epochs", default=10 if not args.mode == "DEV" else 1, type=int, help="Epoch to wait for improvement of MAE_loss before early stopping. Set to np.inf to disable ES.",)
parser.add_argument("--lr", default=1e-3, type=float, help="Learning rate")
parser.add_argument("--step_size", default=1, type=int, help="After this number of steps we decrease learning rate. (Period of learning rate decay)",)
parser.add_argument("--lr_decay", default=0.75, type=float, help="We multiply learning rate by this value after certain number of steps (see --step_size). (Multiplicative factor of learning rate decay)",)
# fmt: on
args_local, _ = parser.parse_known_args()
args = update_namespace_with_another_namespace(args, args_local)
setup_experiment_folder(args, task="pretraining")
logger = create_a_logger(args)
experiment = launch_comet_experiment(args)
logger.info("Loading pretrained data...")
dataset = load_pseudo_labelled_datasets(args)
n_plots = len(dataset)
logger.info(f"Training on N={n_plots} pseudo-labeled plots.")
args.kde_mixture = get_fitted_kde_mixture_from_dataset(dataset, args)
N_PLOTS_IN_VAL_TEST = min(int(0.2 * n_plots), 100)
train_idx, val_idx = np.split(np.arange(n_plots), [n_plots - N_PLOTS_IN_VAL_TEST])
train_set, test_set = get_train_val_datasets(
dataset, args, train_idx=train_idx, val_idx=val_idx
)
all_folds_loss_train_dicts = []
all_folds_loss_test_dicts = []
cloud_info_list_by_fold = {}
args.current_fold_id = -1
(
model,
all_epochs_train_loss_dict,
all_epochs_test_loss_dict,
cloud_info_list,
) = train_full(
train_set,
test_set,
args,
)
model.save_state(args)
cloud_info_list_by_fold[args.current_fold_id] = cloud_info_list
log_last_stats_of_fold(
all_epochs_train_loss_dict,
all_epochs_test_loss_dict,
args,
)
all_folds_loss_train_dicts.append(all_epochs_train_loss_dict)
all_folds_loss_test_dicts.append(all_epochs_test_loss_dict)
post_cross_validation_logging(
all_folds_loss_train_dicts, all_folds_loss_test_dicts, cloud_info_list_by_fold, args
)
| [
"argparse.ArgumentParser",
"learning.accuracy.log_last_stats_of_fold",
"learning.accuracy.post_cross_validation_logging",
"learning.train.train_full",
"utils.load_data.load_pseudo_labelled_datasets",
"learning.kde_mixture.get_fitted_kde_mixture_from_dataset",
"numpy.random.seed",
"warnings.simplefilte... | [((166, 204), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""'}), "(action='ignore')\n", (187, 204), False, 'import warnings\n'), ((440, 458), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (454, 458), True, 'import numpy as np\n'), ((459, 483), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (481, 483), False, 'import torch\n'), ((987, 1005), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1001, 1005), True, 'import numpy as np\n'), ((1006, 1030), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1028, 1030), False, 'import torch\n'), ((1051, 1093), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Pre-Training"""'}), "(description='Pre-Training')\n", (1065, 1093), False, 'from argparse import ArgumentParser\n'), ((2523, 2558), 'utils.load_data.load_pseudo_labelled_datasets', 'load_pseudo_labelled_datasets', (['args'], {}), '(args)\n', (2552, 2558), False, 'from utils.load_data import load_pseudo_labelled_datasets\n'), ((2664, 2714), 'learning.kde_mixture.get_fitted_kde_mixture_from_dataset', 'get_fitted_kde_mixture_from_dataset', (['dataset', 'args'], {}), '(dataset, args)\n', (2699, 2714), False, 'from learning.kde_mixture import get_fitted_kde_mixture_from_dataset\n'), ((3174, 3211), 'learning.train.train_full', 'train_full', (['train_set', 'test_set', 'args'], {}), '(train_set, test_set, args)\n', (3184, 3211), False, 'from learning.train import train_full, initialize_model\n'), ((3315, 3402), 'learning.accuracy.log_last_stats_of_fold', 'log_last_stats_of_fold', (['all_epochs_train_loss_dict', 'all_epochs_test_loss_dict', 'args'], {}), '(all_epochs_train_loss_dict,\n all_epochs_test_loss_dict, args)\n', (3337, 3402), False, 'from learning.accuracy import log_last_stats_of_fold, post_cross_validation_logging\n'), ((3536, 3655), 'learning.accuracy.post_cross_validation_logging', 'post_cross_validation_logging', (['all_folds_loss_train_dicts', 'all_folds_loss_test_dicts', 'cloud_info_list_by_fold', 'args'], {}), '(all_folds_loss_train_dicts,\n all_folds_loss_test_dicts, cloud_info_list_by_fold, args)\n', (3565, 3655), False, 'from learning.accuracy import log_last_stats_of_fold, post_cross_validation_logging\n'), ((2797, 2815), 'numpy.arange', 'np.arange', (['n_plots'], {}), '(n_plots)\n', (2806, 2815), True, 'import numpy as np\n')] |
"""
Code ideas from https://github.com/Newmu/dcgan and tensorflow mnist dataset reader
"""
import numpy as np
#import scipy.misc as misc
from PIL import Image
import os
import glob
from random import shuffle, randint
class seg_dataset_reader:
path = ""
class_mappings = ""
files = []
images = []
annotations = []
test_images = []
test_annotations = []
batch_offset = 0
epochs_completed = 0
def __init__(self, deepscores_path, max_pages=40, crop=True, crop_size=[1000,1000], test_size=20):
"""
Initialize a file reader for the DeepScores classification data
:param records_list: path to the dataset
sample record: {'image': f, 'annotation': annotation_file, 'filename': filename}
"""
print("Initializing DeepScores Classification Batch Dataset Reader...")
self.path = deepscores_path
self.max_pages = max_pages
self.crop = crop
self.crop_size = crop_size
self.test_size = test_size
images_list = []
images_glob = os.path.join(self.path, "images_png", '*.' + 'png')
images_list.extend(glob.glob(images_glob))
#shuffle image list
shuffle(images_list)
if max_pages is None:
max_pages = len(images_list)
import sys
sys.exit(1)
if max_pages > len(images_list):
print("Not enough data, only " + str(len(images_list)) + " available")
if test_size >= max_pages:
print("Test set too big ("+str(test_size)+"), max_pages is: "+str(max_pages))
import sys
sys.exit(1)
print("Splitting dataset, train: "+str(max_pages-test_size)+" images, test: "+str(test_size)+ " images")
test_image_list = images_list[0:test_size]
train_image_list = images_list[test_size:max_pages]
# test_annotation_list = [image_file.replace("/images_png/", "/pix_annotations_png/") for image_file in test_image_list]
# train_annotation_list = [image_file.replace("/images_png/", "/pix_annotations_png/") for image_file in train_image_list]
self._read_images(test_image_list,train_image_list)
def _read_images(self,test_image_list,train_image_list):
dat_train = [self._transform(filename) for filename in train_image_list]
for dat in dat_train:
self.images.append(dat[0])
self.annotations.append(dat[1])
self.images = np.array(self.images)
self.images = np.expand_dims(self.images, -1)
self.annotations = np.array(self.annotations)
self.annotations = np.expand_dims(self.annotations, -1)
print("Training set done")
dat_test = [self._transform(filename) for filename in test_image_list]
for dat in dat_test:
self.test_images.append(dat[0])
self.test_annotations.append(dat[1])
self.test_images = np.array(self.test_images)
self.test_images = np.expand_dims(self.test_images, -1)
self.test_annotations = np.array(self.test_annotations)
self.test_annotations = np.expand_dims(self.test_annotations, -1)
print("Test set done")
def _transform(self, filename):
image = Image.open(filename)
print(image)
annotation = Image.open(filename.replace("/images_png/", "/pix_annotations_png/"))
print("im working!" + str(randint(0,10)))
if not image.size[0:1] == annotation.size[0:1]:
print("input and annotation have different sizes!")
import sys
import pdb
pdb.set_trace()
sys.exit(1)
if image.size[-1] != 1:
# take mean over color channels, image BW anyways --> fix in dataset creation
image = np.mean(image, -1)
print(image)
if self.crop:
width, height = image.shape
print(width, height)
coord_0 = randint(0, (width - self.crop_size[0]))
coord_1 = randint(0, (height - self.crop_size[1]))
image = image.crop((coord_0+self.crop_size[0]), (coord_1+self.crop_size[1]), width, height)
# image = image[coord_0:(coord_0+self.crop_size[0]),coord_1:(coord_1+self.crop_size[1])]
print(image)
annotation = annotation.crop((coord_0+self.crop_size[0]), (coord_1+self.crop_size[1]), width, height)
return [image, annotation]
# from PIL import Image
# im = Image.fromarray(image)
# im.show()
# im = Image.fromarray(annotation)
# im.show()
def get_records(self):
return self.images, self.annotations
def reset_batch_offset(self, offset=0):
self.batch_offset = offset
def get_test_records(self):
return self.test_images, self.test_annotations
def next_batch(self, batch_size):
start = self.batch_offset
self.batch_offset += batch_size
if self.batch_offset > self.images.shape[0]:
# Finished epoch
self.epochs_completed += 1
print("****************** Epochs completed: " + str(self.epochs_completed) + "******************")
# Shuffle the data
perm = np.arange(self.images.shape[0])
np.random.shuffle(perm)
self.images = self.images[perm]
self.annotations = self.annotations[perm]
# Start next epoch
start = 0
self.batch_offset = batch_size
end = self.batch_offset
return self.images[start:end], self.annotations[start:end]
def get_random_batch(self, batch_size):
indexes = np.random.randint(0, self.images.shape[0], size=[batch_size]).tolist()
return self.images[indexes], self.annotations[indexes]
if __name__ == "__main__":
data_reader = seg_dataset_reader("/Users/tugg/datasets/DeepScores")
| [
"numpy.mean",
"PIL.Image.open",
"random.shuffle",
"numpy.arange",
"os.path.join",
"numpy.array",
"numpy.random.randint",
"numpy.expand_dims",
"sys.exit",
"pdb.set_trace",
"random.randint",
"glob.glob",
"numpy.random.shuffle"
] | [((1059, 1110), 'os.path.join', 'os.path.join', (['self.path', '"""images_png"""', "('*.' + 'png')"], {}), "(self.path, 'images_png', '*.' + 'png')\n", (1071, 1110), False, 'import os\n'), ((1199, 1219), 'random.shuffle', 'shuffle', (['images_list'], {}), '(images_list)\n', (1206, 1219), False, 'from random import shuffle, randint\n'), ((2463, 2484), 'numpy.array', 'np.array', (['self.images'], {}), '(self.images)\n', (2471, 2484), True, 'import numpy as np\n'), ((2507, 2538), 'numpy.expand_dims', 'np.expand_dims', (['self.images', '(-1)'], {}), '(self.images, -1)\n', (2521, 2538), True, 'import numpy as np\n'), ((2567, 2593), 'numpy.array', 'np.array', (['self.annotations'], {}), '(self.annotations)\n', (2575, 2593), True, 'import numpy as np\n'), ((2621, 2657), 'numpy.expand_dims', 'np.expand_dims', (['self.annotations', '(-1)'], {}), '(self.annotations, -1)\n', (2635, 2657), True, 'import numpy as np\n'), ((2922, 2948), 'numpy.array', 'np.array', (['self.test_images'], {}), '(self.test_images)\n', (2930, 2948), True, 'import numpy as np\n'), ((2976, 3012), 'numpy.expand_dims', 'np.expand_dims', (['self.test_images', '(-1)'], {}), '(self.test_images, -1)\n', (2990, 3012), True, 'import numpy as np\n'), ((3046, 3077), 'numpy.array', 'np.array', (['self.test_annotations'], {}), '(self.test_annotations)\n', (3054, 3077), True, 'import numpy as np\n'), ((3110, 3151), 'numpy.expand_dims', 'np.expand_dims', (['self.test_annotations', '(-1)'], {}), '(self.test_annotations, -1)\n', (3124, 3151), True, 'import numpy as np\n'), ((3237, 3257), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (3247, 3257), False, 'from PIL import Image\n'), ((1138, 1160), 'glob.glob', 'glob.glob', (['images_glob'], {}), '(images_glob)\n', (1147, 1160), False, 'import glob\n'), ((1327, 1338), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1335, 1338), False, 'import sys\n'), ((1625, 1636), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1633, 1636), False, 'import sys\n'), ((3598, 3613), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3611, 3613), False, 'import pdb\n'), ((3626, 3637), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3634, 3637), False, 'import sys\n'), ((3781, 3799), 'numpy.mean', 'np.mean', (['image', '(-1)'], {}), '(image, -1)\n', (3788, 3799), True, 'import numpy as np\n'), ((3943, 3980), 'random.randint', 'randint', (['(0)', '(width - self.crop_size[0])'], {}), '(0, width - self.crop_size[0])\n', (3950, 3980), False, 'from random import shuffle, randint\n'), ((4005, 4043), 'random.randint', 'randint', (['(0)', '(height - self.crop_size[1])'], {}), '(0, height - self.crop_size[1])\n', (4012, 4043), False, 'from random import shuffle, randint\n'), ((5197, 5228), 'numpy.arange', 'np.arange', (['self.images.shape[0]'], {}), '(self.images.shape[0])\n', (5206, 5228), True, 'import numpy as np\n'), ((5241, 5264), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (5258, 5264), True, 'import numpy as np\n'), ((5622, 5683), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.images.shape[0]'], {'size': '[batch_size]'}), '(0, self.images.shape[0], size=[batch_size])\n', (5639, 5683), True, 'import numpy as np\n'), ((3404, 3418), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (3411, 3418), False, 'from random import shuffle, randint\n')] |
import time
import analysis.event
import analysis.beamline
import analysis.background
import analysis.pixel_detector
import ipc
import random
import numpy
numpy.random.seed()
state = {
'Facility': 'dummy',
'squareImage' : True,
'Dummy': {
'Repetition Rate' : 10,
'Data Sources': {
'CCD': {
'data': lambda: numpy.random.rand(256,128),
'unit': 'ADU',
'type': 'photonPixelDetectors'
},
'CCD1': {
'data': lambda: numpy.random.rand(64,64),
'unit': 'ADU',
'type': 'photonPixelDetectors'
},
'tof': {
'data': lambda: numpy.random.rand(2,256),
'unit': 'mJ',
'type': 'ionTOFs'
},
'pulseEnergy1': {
'data': lambda: random.random(),
'unit': 'mJ',
'type': 'pulseEnergies'
}
}
}
}
def onEvent(evt):
analysis.event.printProcessingRate()
ipc.new_data("TOF", evt["ionTOFs"]["tof"].data)
if numpy.random.randint(100) == 0:
time.sleep(1)
| [
"numpy.random.rand",
"ipc.new_data",
"time.sleep",
"numpy.random.randint",
"numpy.random.seed",
"random.random"
] | [((156, 175), 'numpy.random.seed', 'numpy.random.seed', ([], {}), '()\n', (173, 175), False, 'import numpy\n'), ((1080, 1127), 'ipc.new_data', 'ipc.new_data', (['"""TOF"""', "evt['ionTOFs']['tof'].data"], {}), "('TOF', evt['ionTOFs']['tof'].data)\n", (1092, 1127), False, 'import ipc\n'), ((1135, 1160), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {}), '(100)\n', (1155, 1160), False, 'import numpy\n'), ((1175, 1188), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1185, 1188), False, 'import time\n'), ((364, 391), 'numpy.random.rand', 'numpy.random.rand', (['(256)', '(128)'], {}), '(256, 128)\n', (381, 391), False, 'import numpy\n'), ((544, 569), 'numpy.random.rand', 'numpy.random.rand', (['(64)', '(64)'], {}), '(64, 64)\n', (561, 569), False, 'import numpy\n'), ((721, 746), 'numpy.random.rand', 'numpy.random.rand', (['(2)', '(256)'], {}), '(2, 256)\n', (738, 746), False, 'import numpy\n'), ((888, 903), 'random.random', 'random.random', ([], {}), '()\n', (901, 903), False, 'import random\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""hydrological methods powered by pyFlwDir"""
import warnings
import logging
import numpy as np
import xarray as xr
import geopandas as gpd
import pyflwdir
from typing import Tuple, Union, Optional
from . import gis_utils
logger = logging.getLogger(__name__)
__all__ = [
"flwdir_from_da",
"d8_from_dem",
"reproject_hydrography_like",
"stream_map",
"basin_map",
"outlet_map",
"clip_basins",
"upscale_flwdir",
"dem_adjust",
]
### FLWDIR METHODS ###
def flwdir_from_da(
da: xr.DataArray,
ftype: str = "infer",
check_ftype: bool = True,
mask: Union[xr.DataArray, bool, None] = None,
logger=logger,
):
"""Parse dataarray to flow direction raster object. If a mask coordinate is present
this will be passed on the the pyflwdir.from_array method.
Parameters
----------
da : xarray.DataArray
DataArray containing flow direction raster
ftype : {'d8', 'ldd', 'nextxy', 'nextidx', 'infer'}, optional
name of flow direction type, infer from data if 'infer', by default is 'infer'
check_ftype : bool, optional
check if valid flow direction raster if ftype is not 'infer', by default True
mask : xr.DataArray, bool, optional
Mask for gridded flow direction data, by default None.
If True, use the mask coordinate of `da`.
Returns
-------
flwdir : pyflwdir.FlwdirRaster
Flow direction raster object
"""
if not isinstance(da, xr.DataArray):
raise TypeError("da should be an instance of xarray.DataArray")
crs = da.raster.crs
if crs is None:
raise ValueError("da is missing CRS property, set using `da.raster.set_crs`")
latlon = crs.is_geographic
_crs = "geographic" if latlon else "projected"
_unit = "degree" if latlon else "meter"
logger.debug(f"Initializing flwdir with {_crs} CRS with unit {_unit}.")
if isinstance(mask, xr.DataArray):
mask = mask.values
elif isinstance(mask, bool) and mask and "mask" in da.coords:
# backwards compatibility for mask = True
mask = da["mask"].values
elif not isinstance(mask, np.ndarray):
mask = None
flwdir = pyflwdir.from_array(
data=da.squeeze().values,
ftype=ftype,
check_ftype=check_ftype,
mask=mask,
transform=da.raster.transform,
latlon=latlon,
)
return flwdir
def d8_from_dem(
da_elv: xr.DataArray,
gdf_stream: Optional[gpd.GeoDataFrame] = None,
max_depth: float = -1.0,
outlets: str = "edge",
idxs_pit: Optional[np.ndarray] = None,
) -> xr.DataArray:
"""Derive D8 flow directions grid from an elevation grid.
Outlets occur at the edge of valid data or at user defined cells (if `idxs_pit` is provided).
A local depressions is filled based on its lowest pour point level if the pour point
depth is smaller than the maximum pour point depth `max_depth`, otherwise the lowest
elevation in the depression becomes a pit.
Parameters
----------
da_elv: 2D xarray.DataArray
elevation raster
gdf_stream: geopandas.GeoDataArray, optional
stream vector layer with 'uparea' [km2] column which is used to burn
the river in the elevation data.
max_depth: float, optional
Maximum pour point depth. Depressions with a larger pour point
depth are set as pit. A negative value (default) equals an infinitely
large pour point depth causing all depressions to be filled.
outlets: {'edge', 'min'}
Position for basin outlet(s) at the all valid elevation edge cell ('edge')
or only the minimum elevation edge cell ('min')
idxs_pit: 1D array of int
Linear indices of outlet cells.
Returns
-------
da_flw: xarray.DataArray
D8 flow direction grid
See Also
--------
pyflwdir.dem.fill_depressions
"""
nodata = da_elv.raster.nodata
crs = da_elv.raster.crs
assert da_elv.raster.res[1] < 0
assert nodata is not None and ~np.isnan(nodata)
# burn in river if
if gdf_stream is not None and "uparea" in gdf_stream.columns:
gdf_stream = gdf_stream.sort_values(by="uparea")
dst_rivupa = da_elv.raster.rasterize(gdf_stream, col_name="uparea", nodata=0)
# make sure the rivers have a slope and are below all other elevation cells.
# river elevation = min(elv) - log10(uparea[m2]) from rasterized river uparea.
elvmin = da_elv.where(da_elv != nodata).min()
elvriv = elvmin - np.log10(np.maximum(1.0, dst_rivupa * 1e3))
# synthetic elevation with river burned in
da_elv = elvriv.where(np.logical_and(da_elv != nodata, dst_rivupa > 0), da_elv)
da_elv.raster.set_nodata(nodata)
da_elv.raster.set_crs(crs)
# derive new flow directions from (synthetic) elevation
d8 = pyflwdir.dem.fill_depressions(
da_elv.values.astype(np.float32),
max_depth=max_depth,
nodata=da_elv.raster.nodata,
outlets=outlets,
idxs_pit=idxs_pit,
)[1]
# return xarray data array
da_flw = xr.DataArray(
dims=da_elv.raster.dims,
coords=da_elv.raster.coords,
data=d8,
name="flwdir",
)
da_flw.raster.set_nodata(247)
da_flw.raster.set_crs(crs)
return da_flw
def upscale_flwdir(
ds: xr.Dataset,
flwdir: pyflwdir.FlwdirRaster,
scale_ratio: int,
method: str = "com2",
uparea_name: Optional[str] = None,
flwdir_name: str = "flwdir",
logger=logger,
**kwargs,
) -> Tuple[xr.DataArray, pyflwdir.FlwdirRaster]:
"""Upscale flow direction network to lower resolution.
Parameters
----------
ds : xarray.Dataset
Dataset flow direction.
flwdir : pyflwdir.FlwdirRaster
Flow direction raster object.
scale_ratio: int
Size of upscaled (coarse) grid cells.
uparea_name : str, optional
Name of upstream area DataArray, by default None and derived on the fly.
flwdir_name : str, optional
Name of upscaled flow direction raster DataArray, by default "flwdir"
method : {'com2', 'com', 'eam', 'dmm'}
Upscaling method for flow direction data, by default 'com2'.
Returns
-------
da_flwdir = xarray.DataArray
Upscaled D8 flow direction grid.
flwdir_out : pyflwdir.FlwdirRaster
Upscaled pyflwdir flow direction raster object.
See Also
--------
pyflwdir.FlwdirRaster.upscale
"""
if not np.all(flwdir.shape == ds.raster.shape):
raise ValueError("Flwdir and ds dimensions do not match.")
uparea = None
if uparea_name is not None:
if uparea_name in ds.data_vars:
uparea = ds[uparea_name].values
else:
logger.warning(f'Upstream area map "{uparea_name}" not in dataset.')
flwdir_out, idxs_out = flwdir.upscale(
scale_ratio, method=method, uparea=uparea, **kwargs
)
# setup output DataArray
ftype = flwdir.ftype
dims = ds.raster.dims
xs, ys = gis_utils.affine_to_coords(flwdir_out.transform, flwdir_out.shape)
coords = {ds.raster.y_dim: ys, ds.raster.x_dim: xs}
da_flwdir = xr.DataArray(
name=flwdir_name,
data=flwdir_out.to_array(ftype),
coords=coords,
dims=dims,
attrs=dict(long_name=f"{ftype} flow direction", _FillValue=flwdir._core._mv),
)
# translate outlet indices to global x,y coordinates
x_out, y_out = ds.raster.idx_to_xy(idxs_out, mask=idxs_out != flwdir._mv)
da_flwdir.coords["x_out"] = xr.Variable(
dims=dims,
data=x_out,
attrs=dict(long_name="subgrid outlet x coordinate", _FillValue=np.nan),
)
da_flwdir.coords["y_out"] = xr.Variable(
dims=dims,
data=y_out,
attrs=dict(long_name="subgrid outlet y coordinate", _FillValue=np.nan),
)
# outlet indices
da_flwdir.coords["idx_out"] = xr.DataArray(
data=idxs_out,
dims=dims,
attrs=dict(long_name="subgrid outlet index", _FillValue=flwdir._mv),
)
return da_flwdir, flwdir_out
def reproject_hydrography_like(
ds_hydro: xr.Dataset,
da_elv: xr.DataArray,
river_upa: float = 5.0,
river_len: float = 1e3,
uparea_name: str = "uparea",
flwdir_name: str = "flwdir",
logger=logger,
**kwargs,
) -> xr.Dataset:
"""Reproject flow direction and upstream area data to the `da_elv` crs and grid.
Flow directions are derived from a reprojected grid of synthetic elevation,
based on the log10 upstream area [m2]. For regions without upstream area, the original
elevation is used assuming these elevation values are <= 0 (i.e. offshore bathymetry).
The upstream area on the reprojected grid is based on the new flow directions and
rivers entering the domain, defined by the minimum upstream area `river_upa` [km2]
and a distance from river outlets `river_len` [m]. The latter is to avoid setting
boundary conditions at the downstream end / outflow of a river.
NOTE: the resolution of `ds_hydro` should be similar or smaller than the resolution
of `da_elv` for good results.
NOTE: this method is still experimental and might change in the future!
Parameters
----------
ds_hydro: xarray.Dataset
Dataset with gridded flow directions named `flwdir_name` and upstream area
named `uparea_name` [km2].
da_elv: xarray.DataArray
DataArray with elevation on destination grid.
river_upa: float, optional
Minimum upstream area threshold [km2] for inflowing rivers, by default 5 km2
river_len: float, optional
Mimimum distance from river outlet for inflowing river location, by default 1000 m.
uparea_name, flwdir_name : str, optional
Name of upstream area (default "uparea") and flow direction ("flwdir") variables
in `ds_hydro`.
kwargs: key-word arguments
key-word arguments are passed to `d8_from_dem`
Returns
-------
xarray.Dataset
Reprojected gridded dataset with flow direction and upstream area variables.
See Also
--------
d8_from_dem
"""
# check N->S orientation
assert da_elv.raster.res[1] < 0
assert ds_hydro.raster.res[1] < 0
for name in [uparea_name, flwdir_name]:
if name not in ds_hydro:
raise ValueError(f"{name} variable not found in ds_hydro")
crs = da_elv.raster.crs
da_upa = ds_hydro[uparea_name]
nodata = da_upa.raster.nodata
upa_mask = da_upa != nodata
rivmask = da_upa > river_upa
# synthetic elevation -> max(log10(uparea[m2])) - log10(uparea[m2])
elvsyn = np.log10(np.maximum(1.0, da_upa * 1e3))
elvsyn = da_upa.where(~upa_mask, elvsyn.max() - elvsyn)
# take minimum with rank to ensure pits of main rivers have zero syn. elevation
if np.any(rivmask):
flwdir_src = flwdir_from_da(ds_hydro[flwdir_name], mask=rivmask)
elvsyn = elvsyn.where(flwdir_src.rank < 0, np.minimum(flwdir_src.rank, elvsyn))
# reproject with 'min' to preserve rivers
elv_mask = da_elv != da_elv.raster.nodata
elvsyn_reproj = elvsyn.raster.reproject_like(da_elv, method="min")
# in regions without uparea use elevation, assuming the elevation < 0 (i.e. offshore bathymetry)
elvsyn_reproj = elvsyn_reproj.where(
np.logical_or(elvsyn_reproj != nodata, ~elv_mask),
da_elv - da_elv.where(elvsyn_reproj == nodata).max() - 0.1, # make sure < 0
)
elvsyn_reproj = elvsyn_reproj.where(da_elv != da_elv.raster.nodata, nodata)
elvsyn_reproj.raster.set_crs(crs)
elvsyn_reproj.raster.set_nodata(nodata)
# get flow directions based on reprojected synthetic elevation
# initiate new flow direction at edge with syn elv <= 0 + inland pits if no kwargs given
_edge = pyflwdir.gis_utils.get_edge(elv_mask.values)
if not kwargs:
_msk = np.logical_and(_edge, elvsyn_reproj <= 0)
_msk = np.logical_or(_msk, elvsyn_reproj == 0)
if np.any(_msk): # False if all pits outside domain
kwargs.update(idxs_pit=np.where(_msk.values.ravel())[0])
logger.info(f"Deriving flow direction from reprojected synthethic elevation.")
da_flw1 = d8_from_dem(elvsyn_reproj, **kwargs)
flwdir = flwdir_from_da(da_flw1, ftype="d8", mask=elv_mask)
# find source river cells outside destination grid bbox
outside_dst = da_upa.raster.geometry_mask(da_elv.raster.box, invert=True)
area = flwdir.area / 1e6 # area [km2]
# If any river cell outside the destination grid, vectorize and reproject river segments(!) uparea
# to set as boundary condition to the upstream area map.
nriv = 0
if np.any(np.logical_and(rivmask, outside_dst)):
feats = flwdir_src.streams(uparea=da_upa.values, mask=rivmask)
gdf_stream = gpd.GeoDataFrame.from_features(feats, crs=ds_hydro.raster.crs)
gdf_stream = gdf_stream.sort_values(by="uparea")
# calculate upstream area with uparea from inflowing rivers at edge
# get edge river cells indices
rivupa = da_flw1.raster.rasterize(gdf_stream, col_name="uparea", nodata=0)
rivmsk = np.logical_and(flwdir.distnc > river_len, rivupa > 0).values
inflow_idxs = np.where(np.logical_and(rivmsk, _edge).ravel())[0]
if inflow_idxs.size > 0:
# map nearest segment to each river edge cell;
# keep cell which longest distance to outlet per river segment to avoid duplicating uparea
gdf0 = gpd.GeoDataFrame(
index=inflow_idxs,
geometry=gpd.points_from_xy(*flwdir.xy(inflow_idxs)),
crs=crs,
)
gdf0["distnc"] = flwdir.distnc.flat[inflow_idxs]
gdf0["idx2"], gdf0["dst2"] = gis_utils.nearest(gdf0, gdf_stream)
gdf0 = gdf0.sort_values("distnc", ascending=False).drop_duplicates("idx2")
gdf0["uparea"] = gdf_stream.loc[gdf0["idx2"].values, "uparea"].values
# set stream uparea to selected inflow cells and calculate total uparea
nriv = gdf0.index.size
area.flat[gdf0.index.values] = gdf0["uparea"].values
logger.info(f"Calculating upstream area with {nriv} river inflows.")
da_upa1 = xr.DataArray(
dims=da_flw1.raster.dims,
coords=da_flw1.raster.coords,
data=flwdir.accuflux(area).astype(np.float32),
name="uparea",
attrs=dict(units="km2", _FillValue=-9999),
).where(da_elv != nodata, -9999)
da_upa1.raster.set_crs(crs)
if logger.getEffectiveLevel() <= 10:
upa_reproj_max = da_upa.raster.reproject_like(da_elv, method="max")
max_upa = upa_reproj_max.where(elv_mask).max().values
max_upa1 = da_upa1.max().values
logger.debug(f"New/org max upstream area: {max_upa1:.2f}/{max_upa:.2f} km2")
return xr.merge([da_flw1, da_upa1])
### hydrography maps ###
def gaugemap(
ds: xr.Dataset,
idxs: Optional[np.ndarray] = None,
xy: Optional[Tuple] = None,
ids: Optional[np.ndarray] = None,
mask: Optional[xr.DataArray] = None,
flwdir: Optional[pyflwdir.FlwdirRaster] = None,
logger=logger,
) -> xr.DataArray:
"""This method is deprecated. See :py:meth:`~hydromt.flw.gauge_map`"""
warnings.warn(
'The "gaugemap" method is deprecated, use "hydromt.flw.gauge_map" instead.',
DeprecationWarning,
)
return gauge_map(
ds=ds,
idxs=idxs,
xy=xy,
ids=ids,
stream=mask,
flwdir=flwdir,
logger=logger,
)
def gauge_map(
ds: Union[xr.Dataset, xr.DataArray],
idxs: Optional[np.ndarray] = None,
xy: Optional[Tuple] = None,
ids: Optional[np.ndarray] = None,
stream: Optional[xr.DataArray] = None,
flwdir: Optional[pyflwdir.FlwdirRaster] = None,
max_dist: float = 10e3,
logger=logger,
) -> Tuple[xr.DataArray, np.ndarray, np.ndarray]:
"""Return map with unique gauge IDs.
Gauge locations should be provided by either x,y coordinates (`xy`) or
linear indices (`idxs`). Gauge labels (`ids`) can optionally be provided,
but are by default numbered starting at one.
If `flwdir` and `stream` are provided, the gauge locations are snapped to the
nearest downstream river defined by the boolean `stream` mask. Else, the gauge
locations
Parameters
----------
ds : xarray.Dataset
Dataset or Dataarray with destination grid.
idxs : 1D array or int, optional
linear indices of gauges, by default is None.
xy : tuple of 1D array of float, optional
x, y coordinates of gauges, by default is None.
ids : 1D array of int32, optional
IDs of gauges, values must be larger than zero.
By default None and numbered on the fly.
flwdir : pyflwdir.FlwdirRaster, optional
Flow direction raster object, by default None.
stream: 2D array of bool, optional
Mask of stream cells used to snap gauges to, by default None
max_dist: float, optional
Maximum distance between original and snapped point location.
A warning is logged if exceeded. By default 10 km.
Returns
-------
da_gauges: xarray.DataArray
Map with unique gauge IDs
idxs: 1D array or int
linear indices of gauges
ids: 1D array of int
IDs of gauges
"""
# Snap if mask and flwdir are not None
if xy is not None:
idxs = ds.raster.xy_to_idx(xs=xy[0], ys=xy[1])
elif idxs is None:
raise ValueError("Either idxs or xy required")
if ids is None:
idxs = np.atleast_1d(idxs)
ids = np.arange(1, idxs.size + 1, dtype=np.int32)
# Snapping
# TODO: should we do the snapping similar to basin_map ??
if stream is not None and flwdir is not None:
idxs, dist = flwdir.snap(idxs=idxs, mask=stream, unit="m")
if np.any(dist > max_dist):
far = len(dist[dist > max_dist])
msg = f"Snapping distance of {far} gauge(s) exceeds {max_dist} m"
warnings.warn(msg, UserWarning)
logger.warning(msg)
gauges = np.zeros(ds.raster.shape, dtype=np.int32)
gauges.flat[idxs] = ids
da_gauges = xr.DataArray(
dims=ds.raster.dims,
coords=ds.raster.coords,
data=gauges,
attrs=dict(_FillValue=0),
)
return da_gauges, idxs, ids
def outlet_map(da_flw: xr.DataArray, ftype: str = "infer") -> xr.DataArray:
"""Returns a mask of basin outlets/pits from a flow direction raster.
Parameters
----------
da_flw: xr.DataArray
Flow direction data array
ftype : {'d8', 'ldd', 'nextxy', 'nextidx', 'infer'}, optional
name of flow direction type, infer from data if 'infer', by default is 'infer'
Returns
-------
da_basin : xarray.DataArray of int32
basin outlets/pits ID map
"""
if ftype == "infer":
ftype = pyflwdir.pyflwdir._infer_ftype(da_flw.values)
elif ftype not in pyflwdir.pyflwdir.FTYPES:
raise ValueError(f"Unknown pyflwdir ftype: {ftype}")
pit_values = pyflwdir.pyflwdir.FTYPES[ftype]._pv
mask = np.isin(da_flw.values, pit_values)
return xr.DataArray(mask, dims=da_flw.raster.dims, coords=da_flw.raster.coords)
def stream_map(ds, stream=None, **stream_kwargs):
"""Return a stream mask DataArray
Parameters
----------
ds : xarray.Dataset
dataset containing all maps for stream criteria
stream: 2D array of bool, optional
Initial mask of stream cells. If a stream if provided, it is combined with the
threshold based map using a logical AND operation.
stream_kwargs : dict, optional
Parameter: minimum threshold pairs to define streams.
Multiple threshold will be combined using a logical AND operation.
Returns
-------
stream : xarray.DataArray of bool
stream mask
"""
if stream is None or isinstance(stream, np.ndarray):
data = np.full(ds.raster.shape, True, dtype=bool) if stream is None else stream
stream = xr.DataArray(
coords=ds.raster.coords, dims=ds.raster.dims, data=data, name="mask"
) # all True
for name, value in stream_kwargs.items():
stream = stream.where(
np.logical_and(ds[name] != ds[name].raster.nodata, ds[name] >= value), False
)
if not np.any(stream):
raise ValueError("Stream criteria resulted in invalid mask.")
return stream
def basin_map(
ds: xr.Dataset,
flwdir: pyflwdir.FlwdirRaster,
xy: Optional[Tuple] = None,
idxs: Optional[np.ndarray] = None,
outlets: bool = False,
ids: Optional[np.ndarray] = None,
stream: Optional[xr.DataArray] = None,
**stream_kwargs,
) -> Union[xr.DataArray, Tuple]:
"""Return a (sub)basin map, with unique non-zero IDs for each subbasin.
Parameters
----------
ds : xarray.Dataset
Dataset used for output grid definition and containing `stream_kwargs` variables.
flwdir : pyflwdir.FlwdirRaster
Flow direction raster object
idxs : 1D array or int, optional
linear indices of sub(basin) outlets, by default is None.
xy : tuple of 1D array of float, optional
x, y coordinates of sub(basin) outlets, by default is None.
outlets : bool, optional
If True and xy and idxs are None, the basin map is derived for basin outlets
only, excluding pits at the edge of the domain of incomplete basins.
ids : 1D array of int32, optional
IDs of (sub)basins, must be larger than zero, by default None
stream: 2D array of bool, optional
Mask of stream cells used to snap outlets to, by default None
stream_kwargs : dict, optional
Parameter-treshold pairs to define streams. Multiple threshold will be combined
using a logical_and operation. If a stream if provided, it is combined with the
threshhold based map as well.
Returns
-------
da_basin : xarray.DataArray of int32
basin ID map
xy : tuple of array_like of float
snapped x, y coordinates of sub(basin) outlets
See Also
--------
stream_map
outlet_map
"""
if not np.all(flwdir.shape == ds.raster.shape):
raise ValueError("Flwdir and ds dimensions do not match")
# get stream map
locs = xy is not None or idxs is not None
if locs and (stream is not None or len(stream_kwargs) > 0):
# snap provided xy/idxs to streams
stream = stream_map(ds, stream=stream, **stream_kwargs)
idxs = flwdir.snap(xy=xy, idxs=idxs, mask=stream.values)[0]
xy = None
elif not locs and outlets:
# get idxs from real outlets excluding pits at the domain edge
idxs = flwdir.idxs_outlet
if idxs is None or len(idxs) == 0:
raise ValueError(
"No basin outlets found in domain."
"Provide 'xy' or 'idxs' outlet locations or set 'outlets=False'"
)
da_basins = xr.DataArray(
data=flwdir.basins(idxs=idxs, xy=xy, ids=ids).astype(np.int32),
dims=ds.raster.dims,
coords=ds.raster.coords,
)
da_basins.raster.set_nodata(0)
if idxs is not None:
xy = flwdir.xy(idxs)
return da_basins, xy
def basin_shape(
ds: xr.Dataset,
flwdir: pyflwdir.FlwdirRaster,
basin_name: str = "basins",
mask: bool = True,
**kwargs,
) -> gpd.GeoDataFrame:
"""This method is be deprecated. Use :py:meth:`~hydromt.flw.basin_map` in combination
with :py:meth:`~hydromt.raster.RasterDataArray.vectorize` instead.
"""
warnings.warn(
"basin_shape is deprecated, use a combination of hydromt.flw.basin_map"
" and hydromt.raster.RasterDataArray.vectorize instead.",
DeprecationWarning,
)
if basin_name not in ds:
ds[basin_name] = basin_map(ds, flwdir, **kwargs)[0]
elif not np.all(flwdir.shape == ds.raster.shape):
raise ValueError("flwdir and ds dimensions do not match")
da_basins = ds[basin_name]
nodata = da_basins.raster.nodata
if mask and "mask" in da_basins.coords and nodata is not None:
da_basins = da_basins.where(da_basins.coords["mask"] != 0, nodata)
da_basins.raster.set_nodata(nodata)
gdf = da_basins.raster.vectorize().set_index("value").sort_index()
gdf.index.name = basin_name
return gdf
def clip_basins(
ds: xr.Dataset,
flwdir: pyflwdir.FlwdirRaster,
xy: Optional[Tuple],
flwdir_name: str = "flwdir",
**kwargs,
) -> xr.Dataset:
"""Clip a dataset to a subbasin.
Parameters
----------
ds : xarray.Dataset
Dataset to be clipped, containing flow direction (`flwdir_name`) data
flwdir : pyflwdir.FlwdirRaster
Flow direction raster object
xy : tuple of array_like of float, optional
x, y coordinates of (sub)basin outlet locations
flwdir_name : str, optional
name of flow direction DataArray, by default 'flwdir'
kwargs : key-word arguments
Keyword arguments based to the :py:meth:`~hydromt.flw.basin_map` method.
Returns
-------
xarray.Dataset
clipped dataset
See Also
--------
basin_map
hydromt.RasterDataArray.clip_mask
"""
da_basins, xy = basin_map(ds, flwdir, xy, **kwargs)
idxs_pit = flwdir.index(*xy)
# set pit values in DataArray
pit_value = flwdir._core._pv
if isinstance(pit_value, np.ndarray):
pit_value = pit_value[0]
dir_arr = ds[flwdir_name].values.copy()
dir_arr.flat[idxs_pit] = pit_value
attrs = ds[flwdir_name].attrs.copy()
ds[flwdir_name] = xr.Variable(dims=ds.raster.dims, data=dir_arr, attrs=attrs)
# clip data
ds.coords["mask"] = da_basins
return ds.raster.clip_mask(da_basins)
def dem_adjust(
da_elevtn: xr.DataArray,
da_flwdir: xr.DataArray,
da_rivmsk: Optional[xr.DataArray] = None,
flwdir: Optional[pyflwdir.FlwdirRaster] = None,
connectivity: int = 4,
river_d8: bool = False,
logger=logger,
) -> xr.DataArray:
"""Returns hydrologically conditioned elevation.
The elevation is conditioned to D4 (`connectivity=4`) or D8 (`connectivity=8`)
flow directions based on the algorithm described in Yamazaki et al. [1]_
The method assumes the original flow directions are in D8. Therefore, if
`connectivity=4`, an intermediate D4 conditioned elevation raster is derived
first, based on which new D4 flow directions are obtained used to condition the
original elevation.
Parameters
----------
da_elevtn, da_flwdir, da_rivmsk : xr.DataArray
elevation [m+REF]
D8 flow directions [-]
binary river mask [-], optional
flwdir : pyflwdir.FlwdirRaster, optional
D8 flow direction raster object. If None it is derived on the fly from `da_flwdir`.
connectivity: {4, 8}
D4 or D8 flow connectivity.
river_d8 : bool
If True and `connectivity==4`, additionally condition river cells to D8.
Requires `da_rivmsk`.
Returns
-------
xr.Dataset
Dataset with hydrologically adjusted elevation ('elevtn') [m+REF]
References
----------
.. [1] Yamazaki et al. (2012). Adjustment of a spaceborne DEM for use in floodplain hydrodynamic modeling. Journal of Hydrology, 436-437, 81–91. https://doi.org/10.1016/j.jhydrol.2012.02.045
See Also
--------
pyflwdir.FlwdirRaster.dem_adjust
pyflwdir.FlwdirRaster.dem_dig_d4
"""
# get flow directions for entire domain and for rivers
if flwdir is None:
flwdir = flwdir_from_da(da_flwdir, mask=False)
if connectivity == 4 and river_d8 and da_rivmsk is None:
raise ValueError('Provide "da_rivmsk" in combination with "river_d8"')
elevtn = da_elevtn.values
nodata = da_elevtn.raster.nodata
logger.info(f"Condition elevation to D{connectivity} flow directions.")
# get D8 conditioned elevation
elevtn = flwdir.dem_adjust(elevtn)
# get D4 conditioned elevation (based on D8 conditioned!)
if connectivity == 4:
rivmsk = da_rivmsk.values == 1 if da_rivmsk is not None else None
# derive D4 flow directions with forced pits at original locations
d4 = pyflwdir.dem.fill_depressions(
elevtn=flwdir.dem_dig_d4(elevtn, rivmsk=rivmsk, nodata=nodata),
nodata=nodata,
connectivity=connectivity,
idxs_pit=flwdir.idxs_pit,
)[1]
# condition the DEM to the new D4 flow dirs
flwdir_d4 = pyflwdir.from_array(
d4, ftype="d8", transform=flwdir.transform, latlon=flwdir.latlon
)
elevtn = flwdir_d4.dem_adjust(elevtn)
# condition river cells to D8
if river_d8:
flwdir_river = flwdir_from_da(da_flwdir, mask=rivmsk)
elevtn = flwdir_river.dem_adjust(elevtn)
# assert np.all((elv2 - flwdir_d4.downstream(elv2))>=0)
# save to dataarray
da_out = xr.DataArray(
data=elevtn,
coords=da_elevtn.raster.coords,
dims=da_elevtn.raster.dims,
)
da_out.raster.set_nodata(nodata)
da_out.raster.set_crs(da_elevtn.raster.crs)
return da_out
| [
"logging.getLogger",
"xarray.Variable",
"numpy.isin",
"numpy.arange",
"xarray.merge",
"pyflwdir.gis_utils.get_edge",
"warnings.warn",
"numpy.maximum",
"pyflwdir.pyflwdir._infer_ftype",
"numpy.any",
"numpy.isnan",
"numpy.atleast_1d",
"geopandas.GeoDataFrame.from_features",
"numpy.logical_an... | [((281, 308), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (298, 308), False, 'import logging\n'), ((5148, 5242), 'xarray.DataArray', 'xr.DataArray', ([], {'dims': 'da_elv.raster.dims', 'coords': 'da_elv.raster.coords', 'data': 'd8', 'name': '"""flwdir"""'}), "(dims=da_elv.raster.dims, coords=da_elv.raster.coords, data=d8,\n name='flwdir')\n", (5160, 5242), True, 'import xarray as xr\n'), ((10875, 10890), 'numpy.any', 'np.any', (['rivmask'], {}), '(rivmask)\n', (10881, 10890), True, 'import numpy as np\n'), ((11842, 11886), 'pyflwdir.gis_utils.get_edge', 'pyflwdir.gis_utils.get_edge', (['elv_mask.values'], {}), '(elv_mask.values)\n', (11869, 11886), False, 'import pyflwdir\n'), ((14871, 14899), 'xarray.merge', 'xr.merge', (['[da_flw1, da_upa1]'], {}), '([da_flw1, da_upa1])\n', (14879, 14899), True, 'import xarray as xr\n'), ((15282, 15403), 'warnings.warn', 'warnings.warn', (['"""The "gaugemap" method is deprecated, use "hydromt.flw.gauge_map" instead."""', 'DeprecationWarning'], {}), '(\n \'The "gaugemap" method is deprecated, use "hydromt.flw.gauge_map" instead.\'\n , DeprecationWarning)\n', (15295, 15403), False, 'import warnings\n'), ((18130, 18171), 'numpy.zeros', 'np.zeros', (['ds.raster.shape'], {'dtype': 'np.int32'}), '(ds.raster.shape, dtype=np.int32)\n', (18138, 18171), True, 'import numpy as np\n'), ((19148, 19182), 'numpy.isin', 'np.isin', (['da_flw.values', 'pit_values'], {}), '(da_flw.values, pit_values)\n', (19155, 19182), True, 'import numpy as np\n'), ((19194, 19266), 'xarray.DataArray', 'xr.DataArray', (['mask'], {'dims': 'da_flw.raster.dims', 'coords': 'da_flw.raster.coords'}), '(mask, dims=da_flw.raster.dims, coords=da_flw.raster.coords)\n', (19206, 19266), True, 'import xarray as xr\n'), ((23624, 23794), 'warnings.warn', 'warnings.warn', (['"""basin_shape is deprecated, use a combination of hydromt.flw.basin_map and hydromt.raster.RasterDataArray.vectorize instead."""', 'DeprecationWarning'], {}), "(\n 'basin_shape is deprecated, use a combination of hydromt.flw.basin_map and hydromt.raster.RasterDataArray.vectorize instead.'\n , DeprecationWarning)\n", (23637, 23794), False, 'import warnings\n'), ((25648, 25707), 'xarray.Variable', 'xr.Variable', ([], {'dims': 'ds.raster.dims', 'data': 'dir_arr', 'attrs': 'attrs'}), '(dims=ds.raster.dims, data=dir_arr, attrs=attrs)\n', (25659, 25707), True, 'import xarray as xr\n'), ((28981, 29071), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'elevtn', 'coords': 'da_elevtn.raster.coords', 'dims': 'da_elevtn.raster.dims'}), '(data=elevtn, coords=da_elevtn.raster.coords, dims=da_elevtn.\n raster.dims)\n', (28993, 29071), True, 'import xarray as xr\n'), ((6535, 6574), 'numpy.all', 'np.all', (['(flwdir.shape == ds.raster.shape)'], {}), '(flwdir.shape == ds.raster.shape)\n', (6541, 6574), True, 'import numpy as np\n'), ((10693, 10725), 'numpy.maximum', 'np.maximum', (['(1.0)', '(da_upa * 1000.0)'], {}), '(1.0, da_upa * 1000.0)\n', (10703, 10725), True, 'import numpy as np\n'), ((11366, 11415), 'numpy.logical_or', 'np.logical_or', (['(elvsyn_reproj != nodata)', '(~elv_mask)'], {}), '(elvsyn_reproj != nodata, ~elv_mask)\n', (11379, 11415), True, 'import numpy as np\n'), ((11921, 11962), 'numpy.logical_and', 'np.logical_and', (['_edge', '(elvsyn_reproj <= 0)'], {}), '(_edge, elvsyn_reproj <= 0)\n', (11935, 11962), True, 'import numpy as np\n'), ((11978, 12017), 'numpy.logical_or', 'np.logical_or', (['_msk', '(elvsyn_reproj == 0)'], {}), '(_msk, elvsyn_reproj == 0)\n', (11991, 12017), True, 'import numpy as np\n'), ((12029, 12041), 'numpy.any', 'np.any', (['_msk'], {}), '(_msk)\n', (12035, 12041), True, 'import numpy as np\n'), ((12718, 12754), 'numpy.logical_and', 'np.logical_and', (['rivmask', 'outside_dst'], {}), '(rivmask, outside_dst)\n', (12732, 12754), True, 'import numpy as np\n'), ((12849, 12911), 'geopandas.GeoDataFrame.from_features', 'gpd.GeoDataFrame.from_features', (['feats'], {'crs': 'ds_hydro.raster.crs'}), '(feats, crs=ds_hydro.raster.crs)\n', (12879, 12911), True, 'import geopandas as gpd\n'), ((17610, 17629), 'numpy.atleast_1d', 'np.atleast_1d', (['idxs'], {}), '(idxs)\n', (17623, 17629), True, 'import numpy as np\n'), ((17644, 17687), 'numpy.arange', 'np.arange', (['(1)', '(idxs.size + 1)'], {'dtype': 'np.int32'}), '(1, idxs.size + 1, dtype=np.int32)\n', (17653, 17687), True, 'import numpy as np\n'), ((17893, 17916), 'numpy.any', 'np.any', (['(dist > max_dist)'], {}), '(dist > max_dist)\n', (17899, 17916), True, 'import numpy as np\n'), ((18929, 18974), 'pyflwdir.pyflwdir._infer_ftype', 'pyflwdir.pyflwdir._infer_ftype', (['da_flw.values'], {}), '(da_flw.values)\n', (18959, 18974), False, 'import pyflwdir\n'), ((20078, 20165), 'xarray.DataArray', 'xr.DataArray', ([], {'coords': 'ds.raster.coords', 'dims': 'ds.raster.dims', 'data': 'data', 'name': '"""mask"""'}), "(coords=ds.raster.coords, dims=ds.raster.dims, data=data, name=\n 'mask')\n", (20090, 20165), True, 'import xarray as xr\n'), ((20382, 20396), 'numpy.any', 'np.any', (['stream'], {}), '(stream)\n', (20388, 20396), True, 'import numpy as np\n'), ((22214, 22253), 'numpy.all', 'np.all', (['(flwdir.shape == ds.raster.shape)'], {}), '(flwdir.shape == ds.raster.shape)\n', (22220, 22253), True, 'import numpy as np\n'), ((28547, 28637), 'pyflwdir.from_array', 'pyflwdir.from_array', (['d4'], {'ftype': '"""d8"""', 'transform': 'flwdir.transform', 'latlon': 'flwdir.latlon'}), "(d4, ftype='d8', transform=flwdir.transform, latlon=\n flwdir.latlon)\n", (28566, 28637), False, 'import pyflwdir\n'), ((4075, 4091), 'numpy.isnan', 'np.isnan', (['nodata'], {}), '(nodata)\n', (4083, 4091), True, 'import numpy as np\n'), ((4701, 4749), 'numpy.logical_and', 'np.logical_and', (['(da_elv != nodata)', '(dst_rivupa > 0)'], {}), '(da_elv != nodata, dst_rivupa > 0)\n', (4715, 4749), True, 'import numpy as np\n'), ((11016, 11051), 'numpy.minimum', 'np.minimum', (['flwdir_src.rank', 'elvsyn'], {}), '(flwdir_src.rank, elvsyn)\n', (11026, 11051), True, 'import numpy as np\n'), ((13184, 13237), 'numpy.logical_and', 'np.logical_and', (['(flwdir.distnc > river_len)', '(rivupa > 0)'], {}), '(flwdir.distnc > river_len, rivupa > 0)\n', (13198, 13237), True, 'import numpy as np\n'), ((18053, 18084), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (18066, 18084), False, 'import warnings\n'), ((19988, 20030), 'numpy.full', 'np.full', (['ds.raster.shape', '(True)'], {'dtype': 'bool'}), '(ds.raster.shape, True, dtype=bool)\n', (19995, 20030), True, 'import numpy as np\n'), ((20284, 20353), 'numpy.logical_and', 'np.logical_and', (['(ds[name] != ds[name].raster.nodata)', '(ds[name] >= value)'], {}), '(ds[name] != ds[name].raster.nodata, ds[name] >= value)\n', (20298, 20353), True, 'import numpy as np\n'), ((23921, 23960), 'numpy.all', 'np.all', (['(flwdir.shape == ds.raster.shape)'], {}), '(flwdir.shape == ds.raster.shape)\n', (23927, 23960), True, 'import numpy as np\n'), ((4585, 4621), 'numpy.maximum', 'np.maximum', (['(1.0)', '(dst_rivupa * 1000.0)'], {}), '(1.0, dst_rivupa * 1000.0)\n', (4595, 4621), True, 'import numpy as np\n'), ((13276, 13305), 'numpy.logical_and', 'np.logical_and', (['rivmsk', '_edge'], {}), '(rivmsk, _edge)\n', (13290, 13305), True, 'import numpy as np\n')] |
'''
Implementation of Classifier Training, partly described inside Fanello et al.
'''
import sys
import signal
import errno
import glob
import numpy as np
import class_objects as co
import action_recognition_alg as ara
import cv2
import os.path
import cPickle as pickle
import logging
import yaml
import time
from OptGridSearchCV import optGridSearchCV
# pylint: disable=no-member,R0902,too-many-public-methods,too-many-arguments
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def timeit(func):
'''
Decorator to time extraction
'''
def wrapper(self, *arg, **kw):
t1 = time.time()
res = func(self, *arg, **kw)
t2 = time.time()
self.time.append(t2 - t1)
del self.time[:-5000]
return res
return wrapper
class Classifier(object):
'''
Class to hold all Classifier specific methods.
<descriptors>:['pca','ghog','3dhof']
<action_type>:True if no buffers are used
<sparsecoding_level> is True if sparse coding is used
Classifier Parameters, for example <AdaBoost_n_estimators> or
<RDF_n_estimators> or <kernel> can be
a list, which will be reduced using optimized grid search with cross
validation.
'''
def __init__(self, log_lev='INFO',
visualize=False, masks_needed=True,
buffer_size=co.CONST['buffer_size'],
sparse_dim_rat=co.CONST['sparse_dim_rat'],
descriptors='',
ptpca=False,
ptpca_components=None,
action_type='Dynamic',
classifiers_used='SVM', num_of_cores=4, name='',
svm_c=None,
AdaBoost_n_estimators=None,
RDF_n_estimators=None,
add_info=None,
sparsecoding_level=None,
kernel=None,
save_all_steps=False,
post_scores_processing_method=None,
hardcore=False,
for_app=False):
'''
sparsecoding_level = [Buffer, Features, None]
'''
if not os.path.isdir(co.CONST['AppData']):
os.makedirs(co.CONST['AppData'])
self.app_dir = co.CONST['AppData']
self.for_app = for_app
self.time = []
self.classifiers_ids = None
self.test_ind = None
# General configuration
if not isinstance(descriptors, list):
descriptors = [descriptors]
descriptors = sorted(descriptors)
###
features_params = {}
coders_params = {}
for descriptor in descriptors:
features_params[descriptor] = {}
features_params[descriptor]['params'] = {attrib.replace(descriptor, ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith(descriptor)}
features_params[descriptor]['sparsecoded'] = sparsecoding_level
features_params[descriptor]['action_type'] = action_type
coders_params[descriptor] = {}
if not sparsecoding_level:
features_params[descriptor]['sparse_params'] = None
else:
features_params[descriptor]['sparse_params'] = {
attrib.replace('sparse', ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith('sparse')}
coders_params[descriptor] = {
attrib.replace('sparse', ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith('sparse') and
'fss' not in attrib}
self.test_name = None
self.kernel = kernel
self.svm_c = svm_c
self.RDF_n_estimators = RDF_n_estimators
self.AdaBoost_n_estimators = AdaBoost_n_estimators
self.sparse_dim_rat = sparse_dim_rat
if 'SVM' in classifiers_used and kernel is None:
self.kernel = 'linear'
if 'SVM' in classifiers_used:
if svm_c is None:
self.svm_c = co.CONST['SVM_C']
if post_scores_processing_method == 'CProb':
LOG.warning('Invalid post_scores_processing_method for SVM')
if hardcore:
raise Exception
else:
LOG.warning('Changing method to CSTD')
post_scores_processing_method = 'CSTD'
if 'RDF' in classifiers_used or 'AdaBoost' in classifiers_used:
if svm_c is not None:
LOG.warning(
'svm_c is not None for RDF or AdaBoost experimentation')
if hardcore:
raise Exception
if post_scores_processing_method is None:
if 'RDF' in classifiers_used or 'AdaBoost' in classifiers_used:
post_scores_processing_method = 'CProb'
else:
post_scores_processing_method = 'CSTD'
classifier_params = {}
if 'RDF' in classifiers_used and RDF_n_estimators is None:
self.RDF_n_estimators = co.CONST['RDF_trees']
if 'AdaBoost' in classifiers_used and AdaBoost_n_estimators is None:
self.AdaBoost_n_estimators = co.CONST['AdaBoost_Estimators']
if 'SVM' in classifiers_used:
classifier_params['SVM_kernel'] = self.kernel
classifier_params['SVM_C'] = self.svm_c
if 'RDF' in classifiers_used:
classifier_params['RDF_n_estimators'] = self.RDF_n_estimators
if 'AdaBoost' in classifiers_used:
classifier_params['AdaBoost_n_estimators'] = self.AdaBoost_n_estimators
if action_type != 'Passive':
dynamic_params = {'buffer_size': buffer_size,
'buffer_confidence_tol': co.CONST['buffer_confidence_tol'],
'filter_window_size':
co.CONST['STD_big_filt_window']}
else:
dynamic_params = {'buffer_size': 1}
if ptpca and ptpca_components is None:
ptpca_components = co.CONST['PTPCA_components']
ptpca_params = {'PTPCA_components': ptpca_components}
for descriptor in descriptors:
features_params[descriptor]['dynamic_params'] = dynamic_params
if sparsecoding_level:
if not isinstance(sparse_dim_rat, list):
sparse_dim_rat = [sparse_dim_rat] * len(descriptors)
if len(list(sparse_dim_rat)) != len(descriptors):
raise Exception('<sparse_dim_rat> should be either an integer/None or' +
' a list with same length with <descriptors>')
sparse_params = dict(zip(descriptors, sparse_dim_rat))
sparse_params['fss_max_iter'] = co.CONST['sparse_fss_max_iter']
else:
sparse_params = None
testing_params = {'online': None}
testing_params['post_scores_processing_method'] = \
post_scores_processing_method
fil = os.path.join(co.CONST['rosbag_location'],
'gestures_type.csv')
self.passive_actions = None
self.dynamic_actions = None
if os.path.exists(fil):
with open(fil, 'r') as inp:
for line in inp:
if line.split(':')[0] == 'Passive':
self.passive_actions = line.split(
':')[1].rstrip('\n').split(',')
elif line.split(':')[0] == 'Dynamic':
self.dynamic_actions = line.split(
':')[1].rstrip('\n').split(',')
action_params = {'Passive': self.passive_actions,
'Dynamic': self.dynamic_actions}
LOG.debug('Extracting: ' + str(descriptors))
self.parameters = {'classifier': classifiers_used,
'descriptors': descriptors,
'features_params': features_params,
'coders_params': coders_params,
'dynamic_params': dynamic_params,
'classifier_params': classifier_params,
'sparse_params': sparse_params,
'action_type': action_type,
'sparsecoded': sparsecoding_level,
'testing': False,
'testing_params': testing_params,
'actions_params': action_params,
'PTPCA': ptpca,
'PTPCA_params': ptpca_params}
self.training_parameters = {k: self.parameters[k] for k in
('classifier', 'descriptors',
'features_params',
'dynamic_params',
'classifier_params',
'sparse_params',
'action_type',
'sparsecoded',
'PTPCA',
'PTPCA_params') if k in
self.parameters}
self.descriptors = descriptors
self.add_info = add_info
self.log_lev = log_lev
self.visualize = visualize
self.buffer_size = buffer_size
self.masks_needed = masks_needed
self.action_type = action_type
self.classifiers_used = classifiers_used
self.num_of_cores = num_of_cores
self.name = name
self.ptpca = ptpca
self.action_recog = ara.ActionRecognition(
self.parameters,
log_lev=log_lev)
if not self.for_app:
self.available_tests = sorted(os.listdir(co.CONST['test_save_path']))
else:
self.available_tests = []
self.update_experiment_info()
if 'SVM' in self.classifiers_used:
from sklearn.svm import LinearSVC
self.classifier_type = LinearSVC(
class_weight='balanced', C=self.svm_c,
multi_class='ovr',
dual=False)
elif 'RDF' in self.classifiers_used:
from sklearn.ensemble import RandomForestClassifier
self.classifier_type =\
RandomForestClassifier(self.RDF_n_estimators)
elif 'AdaBoost' in self.classifiers_used:
from sklearn.ensemble import AdaBoostClassifier
self.classifier_type =\
AdaBoostClassifier(n_estimators=self.AdaBoost_n_estimators)
self.unified_classifier = None
if sparsecoding_level:
if not(sparsecoding_level == 'Features' or sparsecoding_level == 'Buffer'):
raise Exception('Invalid sparsecoding_level, its value shoud be '
+ 'None/False/Buffer/Features')
self.sparsecoded = sparsecoding_level
self.decide = None
# Training variables
self.training_data = None
self.train_ground_truth = None # is loaded from memory after training
self.train_classes = None # is loaded from memory after training
# Testing general variables
self.accuracy = None
self.f1_scores = None
self.confusion_matrix = None
self.scores_savepath = None
self.scores_std = []
self.scores_std_mean = []
self.scores = None
self.scores_filter_shape = None
self.std_big_filter_shape = None
self.std_small_filter_shape = None
self.recognized_classes = []
self.crossings = None
self.testname = ''
self.save_fold = None
self.online = False
# Testing offline variables
self.testdataname = ''
self.test_instances = None
# Testing online variables
self.count_prev = None
self.buffer_exists = None
self.scores_exist = None
self.img_count = -1
self._buffer = []
self.scores_running_mean_vec = []
self.big_std_running_mean_vec = []
self.small_std_running_mean_vec = []
self.saved_buffers_scores = []
self.new_action_starts_count = 0
self.test_ground_truth = None
self.mean_from = -1
self.on_action = False
self.act_inds = []
self.max_filtered_score = 0
self.less_filtered_scores_std = None
self.high_filtered_scores_std = None
self.classifier_folder = None
self.testing_initialized = False
self.classifiers_list = {}
self.classifier_savename = 'trained_'
self.classifier_savename += self.full_info.replace(' ', '_').lower()
try:
[self.unified_classifier,
info] = co.file_oper.load_labeled_data(
['Classifier'] + self.classifier_id)
co.file_oper.save_labeled_data(['Classifier'],
[self.unified_classifier,
self.training_parameters],
name=self.app_dir)
if isinstance(info, tuple):
self.training_params = info[0]
self.additional_params = info[1:]
else:
self.training_params = info
self.loaded_classifier = True
LOG.info('Loaded Classifier')
except TypeError:
if self.for_app:
[self.unified_classifier,
info] = co.file_oper.load_labeled_data(
['Classifier'],
name=self.app_dir)
self.loaded_classifier = True
else:
self.loaded_classifier = False
LOG.info('Classifier not Loaded')
self.load_tests()
try:
self.classifier_folder = str(self.classifiers_list[
self.classifier_savename])
except KeyError:
self.classifier_folder = str(len(self.classifiers_list))
self.coders_to_train = []
# parameters bound variables
self.frames_preproc = ara.FramesPreprocessing(self.parameters)
available_descriptors =\
ara.Actions(self.parameters).available_descriptors
try:
self.features_extractors = [available_descriptors[nam](
self.parameters, self.frames_preproc)
for nam in self.parameters['descriptors']]
self.buffer_operators = [
ara.BufferOperations(self.parameters)
for nam in self.parameters['descriptors']]
if self.sparsecoded:
[self.action_recog.
actions.load_sparse_coder(ind) for ind in range(
len(self.parameters['descriptors']))]
except BaseException: pass
def load_tests(self, reset=True):
if reset:
self.testdata = [None] * len(self.available_tests)
self.fscores = [None] * len(self.available_tests)
self.accuracies = [None] * len(self.available_tests)
self.results = [None] * len(self.available_tests)
self.conf_mats = [None] * len(self.available_tests)
self.test_times = [None] * len(self.available_tests)
for count, test in enumerate(self.available_tests):
if (self.testdata[count] is None or
self.testdata[count]['Accuracy'] is None):
self.testdata[count] = co.file_oper.load_labeled_data(
['Testing'] + self.tests_ids[count])
if (self.testdata[count] is not None and
self.testdata[count]['Accuracy'] is not None):
self.accuracies[count] = self.testdata[count]['Accuracy']
self.fscores[count] = self.testdata[count]['FScores']
self.results[count] = self.testdata[count]['Results']
self.conf_mats[count] = self.testdata[count]['ConfMat']
self.test_times[count] = self.testdata[count]['TestTime']
try:
self.partial_accuracies[count] = self.testdata[count][
'PartialAccuracies']
except BaseException: pass
else:
self.testdata[count] = {}
self.testdata[count]['Accuracy'] = {}
self.testdata[count]['FScores'] = {}
self.testdata[count]['Results'] = {}
self.testdata[count]['ConfMat'] = {}
self.testdata[count]['TestTime'] = {}
self.testdata[count]['Labels'] = {}
try:
self.testdata[count]['PartialAccuracies'] = {}
except BaseException: pass
def update_experiment_info(self):
if self.parameters['action_type'] == 'Passive':
info = 'passive '
else:
info = 'dynamic '
info = info + self.name + ' ' + self.classifiers_used + ' '
info += 'using'
if self.parameters['sparsecoded']:
info += ' sparsecoded'
for feature in self.parameters['descriptors']:
info += ' ' + feature
info += ' descriptors '
if 'SVM' in self.parameters['classifier']:
info += 'with ' + self.parameters[
'classifier_params']['SVM_kernel'] + ' kernel'
elif 'RDF' in self.parameters['classifier']:
info += ('with ' + str(self.parameters['classifier_params'][
'RDF_n_estimators']) + ' estimators')
elif 'AdaBoost' in self.parameters['classifier']:
info += ('with ' + str(self.parameters['classifier_params'][
'AdaBoost_n_estimators']) + ' estimators')
if self.parameters['action_type'] == 'Dynamic':
info += ' with buffer size ' + str(self.buffer_size)
if self.parameters['sparsecoded']:
info += ' with sparsecoding by ratio of ' + \
str(self.sparse_dim_rat)
if self.ptpca:
info += (' with ' +
str(self.parameters['PTPCA_params']['PTPCA_components']) +
' post-time-pca components')
self.full_info = info.title()
if self.add_info:
info += self.add_info
self.classifier_savename = 'trained_'
self.classifier_savename += self.full_info.replace(' ', '_').lower()
self.update_classifier_id()
self.update_tests_ids()
def update_classifier_id(self):
self.features_file_id = []
self.features_id = []
for count in range(len(self.parameters['descriptors'])):
_id, file_id = self.action_recog.actions.retrieve_descriptor_possible_ids(count,
assume_existence=True)
self.features_id.append(_id)
self.features_file_id.append(file_id)
self.classifier_id = [co.dict_oper.create_sorted_dict_view(
{'Classifier': str(self.classifiers_used)}),
co.dict_oper.create_sorted_dict_view(
{'ClassifierParams': str(co.dict_oper.create_sorted_dict_view(
self.parameters['classifier_params']))}),
co.dict_oper.create_sorted_dict_view(
{'ActionsType': str(self.action_type)}),
co.dict_oper.create_sorted_dict_view(
{'FeaturesParams': str(self.features_file_id)})]
def update_tests_ids(self):
self.tests_ids = []
for count, test in enumerate(self.available_tests):
self.tests_ids.append([co.dict_oper.create_sorted_dict_view({'Test': str(test)}),
co.dict_oper.create_sorted_dict_view(
{'TestingParams': str(co.dict_oper.create_sorted_dict_view(
self.parameters['testing_params']))})]
+ [self.classifier_id])
def initialize_classifier(self, classifier):
'''
Add type to classifier and set methods
'''
self.unified_classifier = classifier
if 'SVM' in self.classifiers_used:
self.unified_classifier.decide = self.unified_classifier.decision_function
self.unified_classifier.predict = self.unified_classifier.predict
elif 'RDF' in self.classifiers_used or 'AdaBoost' in self.classifiers_used:
self.unified_classifier.decide = self.unified_classifier.predict_proba
self.unified_classifier.predict = self.unified_classifier.predict
co.file_oper.save_labeled_data(['Classifier'] + self.classifier_id,
[self.unified_classifier,
self.training_parameters])
co.file_oper.save_labeled_data(['Classifier'],
[self.unified_classifier,
self.training_parameters],
name=self.app_dir)
def reset_offline_test(self):
'''
Reset offline testing variables
'''
# Testing general variables
self.scores_std = []
self.scores_std_mean = []
self.scores = None
self.recognized_classes = []
self.crossings = None
self.save_fold = None
self.testing_initialized = True
# Testing offline variables
def reset_online_test(self):
'''
Reset online testing variables
'''
# Testing general variables
self.scores_std = []
self.scores_std_mean = []
self.scores = []
self.recognized_classes = []
self.crossings = []
self.save_fold = None
# Testing online variables
self.count_prev = None
self.buffer_exists = []
self.scores_exist = []
self.img_count = -1
self._buffer = []
self.scores_running_mean_vec = []
self.big_std_running_mean_vec = []
self.small_std_running_mean_vec = []
self.saved_buffers_scores = []
self.new_action_starts_count = 0
self.test_ground_truth = None
self.mean_from = -1
self.on_action = False
self.act_inds = []
self.max_filtered_score = 0
self.less_filtered_scores_std = None
self.high_filtered_scores_std = None
self.testing_initialized = True
def add_train_classes(self, training_datapath):
'''
Set the training classes of the classifier
'''
try:
self.train_classes = [name for name in os.listdir(training_datapath)
if os.path.isdir(os.path.join(training_datapath, name))][::-1]
except:
if self.for_app:
with open(os.path.join(self.app_dir,
'train_classes'),'r') as inp:
self.train_classes = pickle.load(inp)
else:
raise
self.all_actions = ['Undefined'] + self.train_classes
# Compare actions in memory with actions in file 'gestures_type.csv'
if self.passive_actions is not None:
passive_actions = [clas for clas in
(self.passive_actions) if clas
in self.train_classes]
if self.dynamic_actions is not None:
dynamic_actions = [clas for clas in
(self.dynamic_actions) if clas
in self.train_classes]
if (self.dynamic_actions is not None and
self.passive_actions is not None):
if 'Sync' in self.classifiers_used:
self.train_classes = {'Passive': passive_actions,
'Dynamic': dynamic_actions}
else:
classes = []
if self.action_type == 'Dynamic' or self.action_type == 'All':
classes += dynamic_actions
if self.action_type == 'Passive' or self.action_type == 'All':
classes += passive_actions
self.train_classes = classes
with open(os.path.join(self.app_dir,
'train_classes'),'w') as out:
pickle.dump(self.train_classes, out)
def run_training(self, coders_retrain=False,
classifiers_retrain=False,
training_datapath=None, classifier_savename=None,
num_of_cores=4, classifier_save=True,
max_act_samples=None,
min_dict_iterations=5,
visualize_feat=False, just_sparse=False,
init_sc_traindata_num=200,
train_all=False):
'''
<Arguments>
For coders training:
Do not train coders if coder already exists or <coders_retrain>
is False. <min_dict_iterations> denote the minimum training iterations to
take place after the whole data has been processed from the trainer
of the coder.<init_dict_traindata_num> denotes how many samples
will be used in the first iteration of the sparse coder training
For svm training:
Train ClassifierS with <num_of_cores>.
Save them if <classifier_save> is True to <classifiers_savepath>. Do not train
if <classifiers_savepath> already exists and <classifiers_retrain> is False.
'''
self.train_all = train_all
self.parameters['testing'] = False
LOG.info(self.full_info + ':')
if classifier_savename is not None:
self.classifier_savename = classifier_savename
if training_datapath is None:
training_datapath = co.CONST['actions_path']
self.add_train_classes(training_datapath)
if self.unified_classifier is None:
LOG.info('Missing trained classifier:' +
self.full_info)
LOG.info('Classifier will be retrained')
classifiers_retrain = True
else:
if not self.sparsecoded:
return
self.prepare_training_data(training_datapath, max_act_samples,
visualize_feat=visualize_feat)
if just_sparse:
return
if self.sparsecoded and self.coders_to_train and classifiers_retrain:
# Enters only if coders were not initially trained or had to be
# retrained. Otherwise, sparse descriptors are computed when
#<Action.add_features> is called
LOG.info('Trained' + str([self.parameters['descriptors'][coder] for coder in
self.coders_to_train]))
LOG.info('Making Sparse Features..')
self.action_recog = ara.ActionRecognition(
self.parameters,
log_lev=self.log_lev,
feat_filename=os.path.join(co.CONST['feat_save_path'],
'saved'))
self.prepare_training_data(training_datapath, max_act_samples,
visualize_feat=visualize_feat)
self.process_training(num_of_cores, classifiers_retrain,
self.classifier_savename, classifier_save)
def prepare_training_data(self, path=None, max_act_samples=None,
visualize_feat=False):
'''
Read actions from the <path> and name them according to their parent
folder name
'''
LOG.info('Adding actions..')
while True:
self.training_data = []
self.training_samples_inds = []
for act_count, action in enumerate(self.train_classes):
LOG.info('Action:' + action)
descriptors, samples_indices, mean_depths, _, trained_coders, _ = self.add_action(name=action,
data=os.path.join(
path, action),
use_dexter=False,
action_type=self.action_type,
max_act_samples=max_act_samples)
if not(self.sparsecoded and None in trained_coders):
descriptors = np.hstack(tuple(descriptors))
fmask = np.prod(np.isfinite(
descriptors), axis=1).astype(bool)
descriptors = descriptors[fmask]
LOG.info('Action \'' + action + '\' has ' +
'descriptors of shape ' + str(descriptors.shape))
self.training_data.append(descriptors)
self.training_samples_inds.append(
np.array(samples_indices)[fmask])
else:
self.training_samples_inds = []
self.training_data = []
self.train_ground_truth = []
if self.training_data:
if self.action_type == 'Dynamic':
self.training_data = co.preproc_oper.equalize_samples(
samples=self.training_data,
utterance_indices=self.training_samples_inds,
mode='random')
self.train_ground_truth = []
for act_count, clas in enumerate(self.training_data):
self.train_ground_truth += clas.shape[0] * [act_count]
self.training_data = np.vstack((self.training_data))
if None in trained_coders and self.sparsecoded:
self.action_recog.actions.train_sparse_dictionary()
else:
break
finite_samples = np.prod(
np.isfinite(
self.training_data),
axis=1).astype(bool)
self.train_ground_truth = np.array(
self.train_ground_truth)[finite_samples]
self.training_data = self.training_data[finite_samples, :]
LOG.info('Total Training Data has shape:'
+ str(self.training_data.shape))
def process_training(self, num_of_cores=4, retrain=False,
savepath=None, save=True):
'''
Train (or load trained) Classifiers with number of cores num_of_cores, with buffer size (stride
is 1) <self.buffer_size>. If <retrain> is True, Classifiers are retrained, even if
<save_path> exists.
'''
loaded = 0
if save and savepath is None:
raise Exception('savepath needed')
if retrain or self.unified_classifier is None:
if retrain and self.unified_classifier is not None:
LOG.info('retrain switch is True, so the Classifier ' +
'is retrained')
classifier_params = {elem.replace(self.classifiers_used + '_', ''):
self.parameters['classifier_params'][elem]
for elem in
self.parameters['classifier_params']
if elem.startswith(self.classifiers_used)}
if any([isinstance(classifier_params[elem], list)
for elem in classifier_params]):
grid_search_params = classifier_params.copy()
from sklearn.multiclass import OneVsRestClassifier
if isinstance(self.classifier_type, OneVsRestClassifier):
grid_search_params = {('estimator__' + key): classifier_params[key]
for key in classifier_params}
grid_search_params = {key: (grid_search_params[key] if
isinstance(
grid_search_params[key], list)
else [
grid_search_params[key]]) for key in
classifier_params}
best_params, best_scores, best_estimators = optGridSearchCV(
self.classifier_type, self.training_data,
self.train_ground_truth, grid_search_params, n_jobs=4,
fold_num=3)
best_params = best_params[-1]
best_scores = best_scores[-1]
best_estimator = best_estimators[-1]
if isinstance(self.classifier_type, OneVsRestClassifier):
best_params = {key.replace('estimator__', ''):
classifier_params[
key.replace('estimator__', '')]
for key in best_params}
classifier_params = {self.classifiers_used + '_' + key: best_params[key] for key
in best_params}
self.parameters['classifier_params'].update(classifier_params)
self.training_parameters['classifier_params'].update(
classifier_params)
self.classifier_type = best_estimator
self.update_experiment_info()
savepath = self.classifier_savename
self.initialize_classifier(self.classifier_type.fit(self.training_data,
self.train_ground_truth))
def compute_testing_time(self, testname):
testing_time = {}
features_extraction_time = 0
if not self.online:
for count in range(len(self.parameters['descriptors'])):
try:
loaded = co.file_oper.load_labeled_data(
[str(self.features_id[count][-1])] +
self.features_file_id[count] +
[str(testname)])
(_, _, _, feat_times) = loaded
except BaseException:
return None
for key in feat_times:
LOG.info('Time:' + str(key) + ':' +
str(np.mean(feat_times[key])))
features_extraction_time += np.mean(feat_times[key])
try:
testing_time['Classification'] = self.time[
-1] / float(self.scores.shape[0])
except IndexError:
testing_time['Classification'] = (
co.file_oper.load_labeled_data(
['Testing'] + self.tests_ids[
self.available_tests.index(
testname)])['TestTime'][
'Classification'])
else:
testing_time['Classification'] = np.mean(self.time)
testing_time['Features Extraction'] = features_extraction_time
return testing_time
def add_action(self, name=None, data=None, visualize=False, offline_vis=False,
to_visualize=[], exit_after_visualization=False,
use_dexter=False,
action_type=None,
max_act_samples=None):
return self.action_recog.add_action(
name=name,
use_dexter=use_dexter,
action_type=self.action_type,
max_act_samples=max_act_samples,
data=data,
offline_vis=offline_vis,
to_visualize=to_visualize,
exit_after_visualization=exit_after_visualization)
def offline_testdata_processing(self, datapath):
'''
Offline testing data processing, using data in <datapath>.
'''
LOG.info('Processing test data..')
LOG.info('Extracting descriptors..')
(descriptors, _, mean_depths, test_name, _, _) = self.add_action(
name=None, data=datapath)
testdata = np.hstack(tuple(descriptors))
self.parameters['testing_params'][test_name] = test_name
self.parameters['testing_params']['current'] = test_name
return testdata
def save_plot(self, fig, lgd=None, display_all=False, info=None):
'''
<fig>: figure
<lgd>: legend of figure
<display_all>: whether to save as Total plot
Saves plot if the action resides in self.available_tests
'''
filename = None
if display_all:
testname = self.action_type.lower()
filename = os.path.join(*self.save_fold.split(os.sep)[:-1] +
['Total', testname + '.pdf'])
else:
if self.test_name is None:
self.test_name = (self.name + ' ' + self.classifiers_used).title()
if self.test_name in self.available_tests:
if self.save_fold is None:
if not self.online:
fold_name = co.file_oper.load_labeled_data(['Testing'],
just_catalog=True,
include_all_catalog=True)[
str(self.tests_ids[
self.available_tests.
index(self.test_name)])]
else:
fold_name = 'Online'
self.save_fold = os.path.join(
co.CONST['results_fold'], 'Classification', fold_name,
self.test_name)
if self.add_info is not None:
self.save_fold = os.path.join(
self.save_fold, self.add_info.replace(' ', '_').lower())
co.makedir(self.save_fold)
LOG.info('Saving to ' + self.save_fold)
if info is not None:
filename = os.path.join(
self.save_fold, (self.testname + ' ' + info +
'.pdf').replace(' ','_'))
else:
filename = os.path.join(
self.save_fold, self.testname.replace(' ','_') + '.pdf')
else:
LOG.warning('Requested figure to plot belongs to an' +
' action that does not reside in <self.'+
'available_tests> .Skipping..')
filename = None
import matplotlib.pyplot as plt
if filename is not None:
if lgd is None:
plt.savefig(filename)
else:
plt.savefig(filename,
bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
def plot_result(self, data, info=None, save=True, xlabel='Frames', ylabel='',
labels=None, colors=None, linewidths=None, alphas=None,
xticks_names=None, yticks_names=None, xticks_locs=None,
yticks_locs=None, markers=None, markers_sizes=None, zorders=None, ylim=None, xlim=None,
display_all=False, title=False):
'''
<data> is a numpy array dims (n_points, n_plots),
<labels> is a string list of dimension (n_plots)
<colors> ditto
'''
import matplotlib
from matplotlib import pyplot as plt
#matplotlib.rcParams['text.usetex'] = True
#matplotlib.rcParams['text.latex.unicode'] = True
# plt.style.classifiers_used('seaborn-ticks')
if len(data.shape) == 1:
data = np.atleast_2d(data).T
fig, axes = plt.subplots()
if xticks_locs is not None:
axes.set_xticks(xticks_locs, minor=True)
axes.xaxis.grid(True, which='minor')
if yticks_locs is not None:
axes.set_yticks(yticks_locs, minor=True)
axes.yaxis.grid(True, which='minor')
if xticks_names is not None:
plt.xticks(range(len(xticks_names)), xticks_names)
if yticks_names is not None:
plt.yticks(range(len(yticks_names)), yticks_names)
if markers is None:
markers = [','] * data.shape[1]
if markers_sizes is None:
markers_sizes = [10] * data.shape[1]
if colors is None:
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
if alphas is None:
alphas = data.shape[1] * [1]
if zorders is None:
zorders = data.shape[1] * [0]
while len(colors) < data.shape[1]:
colors += [tuple(np.random.random(3))]
if linewidths is None:
linewidths = [1] * data.shape[1]
lgd = None
for count in range(data.shape[1]):
if labels is not None:
axes.plot(data[:, count], label='%s' % labels[count],
color=colors[count],
linewidth=linewidths[count],
marker=markers[count], alpha=alphas[count],
zorder=zorders[count],
markersize=markers_sizes[count])
lgd = co.plot_oper.put_legend_outside_plot(axes,
already_reshaped=True)
else:
axes.plot(data[:, count],
color=colors[count],
linewidth=linewidths[count],
marker=markers[count], alpha=alphas[count],
zorder=zorders[count],
markersize=markers_sizes[count])
if title:
if info is not None:
plt.title(self.testname +
'\n Dataset: ' + self.testdataname +
'\n' + info.title())
else:
plt.title(self.testname +
'\n Dataset ' + self.testdataname)
info = ''
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
if save:
self.save_plot(fig, lgd, display_all=display_all, info=info)
return fig, lgd, axes
def init_testing(self, data=None, online=True, save=True, load=True,
testname=None, scores_savepath=None,
scores_filter_shape=5,
std_small_filter_shape=co.CONST['STD_small_filt_window'],
std_big_filter_shape=co.CONST['STD_big_filt_window'],
testdatapath=None, save_results=True):
'''
Initializes paths and names used in testing to save, load and visualize
data.
Built as a convenience method, in case <self.run_testing> gets overriden.
'''
self.parameters['testing'] = True
self.parameters['testing_params']['online'] = online
if online:
self.reset_online_test()
else:
self.reset_offline_test()
self.scores_filter_shape = scores_filter_shape
self.std_small_filter_shape = std_small_filter_shape
self.std_big_filter_shape = std_big_filter_shape
self.online = online
if testname is not None:
self.testname = testname.title()
else:
self.testname = (self.name + ' ' + self.classifiers_used).title()
if self.add_info is not None:
self.testname += ' ' + self.add_info.title()
self.parameters['testing_params']['current'] = self.testname
if online:
if testdatapath is not None:
self.testdataname = ('online (using '
+ os.path.basename(testdatapath) + ')')
else:
self.testdataname = 'online'
else:
self.testdataname = os.path.basename(data)
if not self.online:
if self.test_ind is not None:
available_tests_ids = co.file_oper.load_labeled_data(['Testing'],
just_catalog=True,
include_all_catalog=True)
if available_tests_ids is None:
fold_name = '0'
else:
curr_test_id = self.tests_ids[self.available_tests.
index(self.test_name)]
if str(curr_test_id) in available_tests_ids:
fold_name = str(available_tests_ids[str(curr_test_id)])
else:
fold_name = str(len(available_tests_ids))
else:
self.test_name = 'Online'
try:
fold_name = os.path.join(*[co.CONST['results_fold'],
'Classification', 'Online'])
except OSError:
fold_name = '0'
if self.test_ind is not None:
self.save_fold = os.path.join(
co.CONST['results_fold'], 'Classification', self.test_name,
fold_name)
co.makedir(self.save_fold)
if save or load:
fold_name = self.classifier_folder
if scores_savepath is None:
self.scores_savepath = self.testdataname + '_scores_for_'
self.scores_savepath += self.full_info.replace(' ',
'_').lower()
self.scores_savepath += '.pkl'
else:
self.scores_savepath = scores_savepath
return True
def run_testing(self, data=None, derot_angle=None, derot_center=None,
online=True,
scores_filter_shape=5,
std_small_filter_shape=co.CONST['STD_small_filt_window'],
std_big_filter_shape=co.CONST['STD_big_filt_window'],
ground_truth_type=None,
img_count=None, save=True, scores_savepath=None,
load=False, testname=None, display_scores=True,
construct_gt=True, just_scores=False, testdatapath=None,
compute_perform=True,
save_results=True):
'''
Test Classifiers using data (.png files) located in <data>. If <online>, the
testing is online, with <data> being a numpy array, which has been
firstly processed by <hand_segmentation_alg>. The scores retrieved from
testing are filtered using a box filter of shape <box_filter_shape>.
The running mean along a buffer
of the data is computed with a running window of length
<mean_filter_shape>. The ground truth for the testing data is given by
<ground_truth_type> (for further info about the variable refer to
<co.gd_oper.construct_ground_truth>). If the training is online, the count of
the frame is passed by <img_count>. If <save> is True,
testing results are saved to <scores_savepath>, or a path constructed
by the configuration. <testname> overrides the first line of the plots.
If <load> is True and <scores_save_path> exists, testing is bypassed and all the
necessary results are loaded from memory. If <just_scores> is True, the
classification stage is not done and only scores are computed. If
<testdatapath> is not <None> and <online> is True, then it will be
assumed that a pseudoonline testing is taking place
'''
loaded = False
if not online:
LOG.info('Testing:' + data)
try:
self.test_ind = self.available_tests.index(data)
self.test_name = data
except BaseException:
if data.split(os.sep)[-1] in self.available_tests:
self.test_ind = (
self.available_tests.index(data.split(os.sep)[-1]))
self.test_name = data.split(os.sep)[-1]
elif data in self.dynamic_actions or data in self.passive_actions:
self.test_ind = None
elif data.split(os.sep)[-1] in self.dynamic_actions or \
data.split(os.sep)[-1] in self.passive_actions:
self.test_ind = None
else:
raise Exception('test data must be inside test_save_path,' +
' check config.yaml')
if construct_gt and ground_truth_type is None:
ground_truth_type =os.path.join(
co.CONST['ground_truth_fold'],
self.test_name + '.csv')
elif isinstance(data, tuple):
derot_angle = data[1]
derot_center = data[2]
data = data[0]
if not self.testing_initialized or not online:
if not self.init_testing(data=data,
online=online,
save=save,
load=load,
testname=testname,
scores_savepath=scores_savepath,
scores_filter_shape=scores_filter_shape,
std_small_filter_shape=std_small_filter_shape,
std_big_filter_shape=std_big_filter_shape,
testdatapath=testdatapath,
save_results=save_results):
return False
if not online:
if self.test_ind is not None and (
load and self.accuracies[self.available_tests.index(self.test_name)]
is not None):
LOG.info('Tests already performed, loaded data')
try:
self.scores = self.results['Scores']
loaded = True
except:
pass
if not loaded:
if self.test_ind is not None:
testdata = self.offline_testdata_processing(
os.path.join(co.CONST['test_save_path'],
self.test_name))
else:
testdata = self.offline_testdata_processing(
data)
try:
self.test_ind = self.available_tests.index(data)
except BaseException: self.test_ind = None
LOG.info(self.full_info + ':')
LOG.info('Testing Classifiers using testdata with size: '
+ str(testdata.shape))
fmask = np.prod(np.isfinite(testdata), axis=1).astype(bool)
fin_scores = self.unified_classifier.decide(
testdata[fmask, :])
self.scores = np.zeros(
(testdata.shape[0], fin_scores.shape[1]))
self.scores[:] = None
self.scores[fmask] = fin_scores
if self.test_ind is not None:
self.testdata[self.test_ind]['Results']['Scores'] = self.scores
if construct_gt:
LOG.info('Constructing ground truth vector..')
self.test_ground_truth, self.test_breakpoints = co.gd_oper.construct_ground_truth(
os.path.join(co.CONST['test_save_path'], self.test_name),
classes_namespace=self.train_classes,
length=self.scores.shape[0],
ground_truth_type=ground_truth_type,
ret_breakpoints=True)
utterances_inds = co.gd_oper.merge_utterances_vectors(
co.gd_oper.create_utterances_vectors(
self.test_breakpoints, len(self.test_ground_truth)),
self.train_classes)
if not just_scores:
self.classify_offline(save=save, display=display_scores,
compute_perform=compute_perform,
extraction_method=
self.parameters[
'testing_params']['post_scores_processing_method'])
self.correlate_with_ground_truth(save=save,
display=display_scores,
compute_perform=compute_perform,
utterances_inds=utterances_inds)
self.display_scores_and_time(save=save)
if self.test_ind is not None:
co.file_oper.save_labeled_data(['Testing'] +self.tests_ids[
self.test_ind], self.testdata[self.test_ind])
if not just_scores:
if display_scores:
if self.parameters['testing_params'][
'post_scores_processing_method'] == 'CSTD':
self.plot_result(np.concatenate((
self.less_filtered_scores_std[:, None],
self.high_filtered_scores_std[:, None]), axis=1),
info='Scores Statistics',
xlabel='Frames',
labels=['STD', 'STD Mean'],
colors=['r', 'g'],
save=save)
mean_diff = (np.array(self.high_filtered_scores_std) -
np.array(self.less_filtered_scores_std))
mean_diff = (mean_diff) / float(np.max(np.abs(mean_diff[
np.isfinite(mean_diff)])))
plots = [mean_diff]
labels = ['ScoresSTD - ScoresSTDMean']
if self.test_ground_truth is not None:
plots += [((self.test_ground_truth - np.mean(self.test_ground_truth[
np.isfinite(self.test_ground_truth)])) / float(
np.max(self.test_ground_truth[
np.isfinite(self.test_ground_truth)])))[:, None]]
labels += ['Ground Truth']
self.plot_result(np.concatenate(plots, axis=1), labels=labels,
info='Metric of actions starting and ending ' +
'points', xlabel='Frames', save=save)
if display_scores:
self.plot_result(self.scores,
labels=self.train_classes,
info='Scores',
xlabel='Frames',
save=save,
)
return True, self.scores
else:
'''
input is processed from hand_segmentation_alg (any data
processed in such way, that the result is the same with my processing,
is acceptable, eg. Dexter)
There must be a continuous data streaming (method called in every
loop), even if the result of the previous algorithm is None
'''
scores_exist, score = self.process_online_data(data, img_count,
derot_angle,
derot_center,
just_scores=just_scores)
return scores_exist, score
def visualize_action(self, action, save=True,
save_name=None, *args, **kwargs):
'''
Visualizes action or a testing dataset using predefined locations in
config.yaml and the method co.draw_oper.plot_utterances
'''
dataset_loc = '/media/vassilis/Thesis/Datasets/PersonalFarm/'
results_loc = '/home/vassilis/Thesis/KinectPainting/Results/DataVisualization'
ground_truth, breakpoints, labels = co.gd_oper.load_ground_truth(action, ret_labs=True,
ret_breakpoints=True)
testing =True
images_base_loc = os.path.join(dataset_loc, 'actions',
'sets' if not testing else 'whole_result')
images_loc = os.path.join(
images_base_loc, action.replace(
'_', ' ').title())
imgs, masks, sync, angles, centers, samples_indices = co.imfold_oper.load_frames_data(
images_loc, masks_needed=True)
import cv2
masks_centers = []
xdim = 0
ydim = 0
conts = []
tmp = []
for mask, img in zip(masks, imgs):
conts = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[1]
conts_areas = [cv2.contourArea(cont) for cont in conts]
tmp.append(np.sum(mask*img >0))
if np.sum(mask*img >0) < 500:
masks_centers.append(None)
else:
cont = conts[np.argmax(conts_areas)]
x, y, w, h = cv2.boundingRect(cont)
if w == 0 or h == 0:
masks_centers.append(None)
else:
masks_centers.append([y +h/2, x+w/2])
xdim = max(w, xdim)
ydim = max(h, ydim)
cropped_imgs = []
for img, center in zip(imgs, masks_centers):
if center is not None:
cropped_img = img[max(0, center[0]-ydim/2)
:min(img.shape[0], center[0]+ydim/2),
max(0, center[1]-xdim/2)
:min(img.shape[0], center[1]+xdim/2)]
inp_img = np.zeros((ydim, xdim))
inp_img[:cropped_img.shape[0],
:cropped_img.shape[1]] = cropped_img
cropped_imgs.append(inp_img)
else:
cropped_imgs.append(None)
fig = co.draw_oper.plot_utterances(frames=cropped_imgs,
frames_sync=sync,
ground_truth=ground_truth,
breakpoints= breakpoints,
labels=labels,
dataset_name=action,
*args, **kwargs)
if save:
if save_name is None:
save_name = 'Full' + action
fig.savefig(os.path.join(results_loc,
save_name + '.pdf'))
return fig
'''
categories_to_zoom=None,
#categories_to_zoom = self.dynamic_actions,
show_breaks=True, show_occ_tab=False,
show_zoomed_occ=True, show_im_examples=False,
show_fig_title=True,
examples_num=15
'''
def apply_to_training(
self, method, excluded_actions=None, *args, **kwargs):
'''
Apply a method to training data
'''
prev_root = ''
prev_action = ''
res = []
actions = (self.passive_actions +
self.dynamic_actions)
if excluded_actions is not None:
for action in excluded_actions:
actions.remove(action)
paths = os.listdir(co.CONST['actions_path'])
for action in actions:
if action not in paths:
actions.remove(action)
if not actions:
raise Exception('Badly given actions_path in config.yaml')
dirs = [os.path.join(co.CONST['actions_path'], action) for action in
actions]
for direc in dirs:
for root, dirs, _ in os.walk(direc):
separated_root = os.path.normpath(
root).split(
os.path.sep)
if root != prev_root and str.isdigit(
separated_root[-1]) and separated_root[
-2] != co.CONST['hnd_mk_fold_name']:
prev_root = root
if separated_root[-2] == co.CONST['mv_obj_fold_name']:
action = separated_root[-3]
action_path = (os.path.sep).join(separated_root[:-2])
else:
action = separated_root[-2]
if excluded_actions is not None:
if action in excluded_actions:
continue
if action != prev_action:
LOG.info('Processing action: ' + action)
res.append(method(action_path=action_path,
action_name=action,
*args, **kwargs))
prev_action = action
try:
return map(list, zip(*res))
except TypeError:
return res
def display_scores_and_time(self, save=False):
'''
Displays scores and elapsed time
'''
if self.online:
self.plot_result(np.array(self.scores),
labels=self.train_classes,
xlabel='Frames',
save=save)
LOG.info(self.name.title())
if self.test_ind is not None:
test_times = self.compute_testing_time(self.available_tests[
self.test_ind])
LOG.info('Mean Testing Times:\n\t' +str(test_times))
if not self.online:
self.testdata[self.test_ind]['TestTime'] = test_times
def process_single_sample(self, data, img_count,
derot_angle=None, derot_center=None):
if data is None:
self.scores.append(None)
return False, np.array([[None] *
len(self.train_classes)]).astype(
np.float64)
if not self.frames_preproc.update(data,
angle=derot_angle,
center=derot_center,
masks_needed=True,
img_count=self.img_count,
isderotated=False):
return False, np.array([None] * len(self.train_classes))
descriptors = [descriptor.extract() for
descriptor in self.features_extractors]
if not any([desc is None for desc in descriptors]):
for count in range(len(descriptors)):
if self.sparsecoded == 'Features':
descriptors[count] = (self.action_recog.actions.
coders[count].code(descriptors[count]))
self.buffer_operators[count].update_buffer_info(
self.img_count, samples=descriptors[count])
self.buffer_operators[count].add_buffer()
descriptors[count] = self.buffer_operators[count].buffer
if descriptors[count] is None:
return False, np.array([[None] *
len(self.train_classes)]).astype(
np.float64)
if self.sparsecoded == 'Buffer':
descriptors[count] = (self.action_recog.actions.
coders[count].code(descriptors[count]))
if self.ptpca:
descriptors[count] = self.buffer_operators[
count].perform_post_time_pca(
descriptors[count])
else:
return False, np.array([[None] *
len(self.train_classes)]).astype(
np.float64)
inp = np.hstack(tuple(descriptors))
try:
score = (self.unified_classifier.decide(inp))
except Exception as e:
raise
return True, np.array(score).reshape(1, -1)
def process_online_data(self, data, img_count=None,
derot_angle=None, derot_center=None,
just_scores=False):
'''
<data> is the frame with frame number <img_count> or increasing by one
relatively to the previous frame. Scores are filtered with a filter of
length <self.scores_filter_shape>. <self.std_small_filter_shape> is the shape
of the filter used to remove the temporal noise from the scores std.
<self.std_big_filter_shape> is the shape of the filter to compute the mean
of the scores std. Returns True if scores have been computed
'''
self.img_count += 1
self.mean_from += 1
# self.buffer_exists = self.buffer_exists[
# -self.std_big_filter_shape:]
if not self.action_type == 'Passive':
if not self.img_count or (img_count == 0):
self._buffer = []
self.mean_from = 0
self.buffer_exists = []
self.scores = []
self.scores_std_mean = []
self.scores_std = []
self.small_std_running_mean_vec = []
self.big_std_running_mean_vec = []
self.scores_running_mean_vec = []
self.act_inds = []
self.crossings = []
self.count_prev = self.img_count - 1
if img_count is not None:
self.scores += [None] * (img_count - self.img_count - 1)
self.mean_from = img_count - self.img_count + self.mean_from
self.img_count = img_count
elif not self.img_count:
self.scores = []
valid, score = self.process_single_sample(data, img_count,
derot_angle,
derot_center)
self.scores.append(score)
if not valid:
return valid, score
if not just_scores:
self.classify_online(score, self.img_count,
self.mean_from)
return True, np.array(score).reshape(1, -1)
@timeit
def classify_online(self, score, img_count, mean_from):
'''
To be used after scores from <online_processing_data> have been
computed. It is a convenience function to allow the modification of
the scores, if this is wanted, before performing classification
'''
if self.action_type == 'Passive':
self.scores_filter_shape = 3
if len(self.scores_running_mean_vec) < self.scores_filter_shape:
self.scores_running_mean_vec.append(score.ravel())
else:
self.scores_running_mean_vec = (self.scores_running_mean_vec[1:]
+ [score.ravel()])
# filter scores using a mean window
self.scores[-1] = np.mean(np.array(self.scores_running_mean_vec), axis=0)
if not self.action_type == 'Passive':
start_from = min(self.scores_filter_shape, mean_from)
score_std = np.std(self.scores[-1])
if len(self.small_std_running_mean_vec) < self.std_small_filter_shape:
self.small_std_running_mean_vec.append(score_std)
else:
self.small_std_running_mean_vec = (
self.small_std_running_mean_vec[1:] +
[score_std])
filtered_score_std = np.mean(self.small_std_running_mean_vec)
self.scores_std.append(filtered_score_std)
if len(self.big_std_running_mean_vec) < self.std_big_filter_shape:
self.big_std_running_mean_vec.append(filtered_score_std)
else:
self.big_std_running_mean_vec = (self.big_std_running_mean_vec[1:]
+ [filtered_score_std])
if mean_from >= self.std_big_filter_shape:
start_from = 0
else:
start_from = - mean_from
self.scores_std_mean.append(
np.mean(self.big_std_running_mean_vec[-start_from:]))
std_mean_diff = self.scores_std_mean[
-1] - self.scores_std[-1]
if (np.min(std_mean_diff) > co.CONST['action_separation_thres'] and not
self.on_action) or not self.recognized_classes:
self.crossings.append(img_count)
self.on_action = True
if self.recognized_classes is not None:
self.recognized_classes.add(length=img_count -
self.new_action_starts_count +
1)
LOG.info('Frame ' + str(img_count) + ': ' +
self.recognized_classes.name +
', starting from frame ' +
str(self.recognized_classes.start) +
' with length ' +
str(self.recognized_classes.length))
else:
self.recognized_classes = RecognitionVectorizer(
self.train_classes)
index = np.argmax(self.scores[-1])
self.max_filtered_score = self.scores[-1][index]
self.act_inds = [index]
self.new_action_starts_count = img_count
LOG.info('Frame ' + str(img_count) + ': ' +
self.recognized_classes.name)
self.saved_buffers_scores = []
return None
else:
if len(self.recognized_classes) > 0:
_arg = np.argmax(self.scores[-1])
if self.max_filtered_score < self.scores[
-1][_arg]:
self.max_filtered_score = self.scores[
-1][_arg]
self.recognized_classes[-1].add(index=_arg)
LOG.info('Frame ' + str(img_count) + ': ' +
self.recognized_classes[-1].name)
if std_mean_diff < co.CONST['action_separation_thres']:
self.on_action = False
self.saved_buffers_scores.append(score)
return None
else:
return None
else:
if np.max(self.scores[-1]) >= 0.6 or len(
self.recognized_classes) == 0:
self.recognized_classes.append(
self.scores[-1].argmax())
else:
self.recognized_classes.append(
self.recognized_classes[-1])
LOG.info('Pose detected:'
+ self.train_classes[self.recognized_classes[-1]])
return self.train_classes[self.recognized_classes[-1]]
def extract_actions(self, scores, method='CProb', tol=0.7,
filterr=True):
if filterr:
scores = co.noise_proc.masked_filter(scores,
self.scores_filter_shape)
extracted_actions = []
if method == 'CProb':
for count, score in enumerate(scores):
if not np.prod(np.isfinite(score)).astype(bool):
extracted_actions.append(np.nan)
continue
if (np.max(score) >= tol or len(
extracted_actions) == 0 or
np.isnan(extracted_actions[-1])):
extracted_actions.append(score.argmax())
else:
extracted_actions.append(
extracted_actions[-1])
return extracted_actions, None
elif method == 'CSTD':
fmask = np.prod(np.isfinite(scores), axis=1).astype(bool)
scores_std = np.zeros(scores.shape[0])
scores_std[:] = None
scores_std[fmask] = np.std(scores[fmask, :],
axis=1)
less_filtered_scores_std = co.noise_proc.masked_filter(
scores_std, self.std_small_filter_shape)
high_filtered_scores_std = co.noise_proc.masked_filter(
scores_std,
self.std_big_filter_shape)
positive = np.zeros(scores.shape[0])
positive[:] = None
positive[fmask] = ((high_filtered_scores_std -
less_filtered_scores_std).ravel()[fmask] > 0).astype(int)
# We are interested only in finding negative to positive zero crossings,
# because this is where std falls below its mean
neg_to_pos_zero_crossings = np.where(positive[1:] -
positive[:-1] ==
-1)[0]
crossings = neg_to_pos_zero_crossings
interesting_crossings = np.concatenate((np.array([0]),
neg_to_pos_zero_crossings,
np.array([scores.shape[0]])),
axis=0)
for cross1, cross2 in zip(interesting_crossings[
:-1], interesting_crossings[1:]):
act_scores = scores[cross1:cross2, :]
mask = fmask[cross1:cross2]
if not np.any(mask):
act = np.zeros(cross2 - cross1)
act[:] = None
extracted_actions.append(act)
continue
index = np.mean(
act_scores[mask, :], axis=0).argmax()
'''
index = np.median(
act_scores[mask, :], axis=0).argmax()
'''
act = index * np.ones(cross2 - cross1)
act[np.logical_not(mask)] = None
extracted_actions.append(act)
extracted_actions = np.concatenate(
tuple(extracted_actions), axis=0)
return extracted_actions, (less_filtered_scores_std,
high_filtered_scores_std,
crossings)
@timeit
def classify_offline(self, display=True,
save=True, compute_perform=True,
extraction_method=None, tol=0.7):
'''
To be used after offline have been computed. It is a convenience
function to allow the modification of the scores, if this is wanted,
before performing classification.
Process scores using stds as proposed by the paper
'''
if 'Sync' in self.classifiers_used:
if extraction_method is None:
extraction_method = 'CProb'
if not isinstance(extraction_method, list):
extraction_method = [extraction_method] * len(self.scores)
if not isinstance(tol, list):
tol = [tol] * len(self.scores)
self.recognized_classes = {}
for count, key in enumerate(self.scores):
(extracted_actions, more) = self.extract_actions(
self.scores[key], method=extraction_method[count],
tol=tol[count])
self.recognized_classes[key] = extracted_actions
else:
(self.recognized_classes,
more) = self.extract_actions(
self.scores, method=extraction_method)
if extraction_method == 'CSTD':
if self.test_ind is not None:
self.testdata[self.test_ind][
'Results'][
'LessFilteredScoresSTD'] = more[0]
self.testdata[self.test_ind][
'Results'][
'HighFilteredScoresSTD'] = more[1]
self.testdata[self.test_ind][
'Results'][
'Crossings'] = more[2]
self.less_filtered_scores_std = more[0]
self.high_filtered_scores_std = more[1]
self.crossings = more[2]
if self.test_ind is not None:
self.testdata[self.test_ind]['Results'][
'Actions'] = self.recognized_classes
return self.recognized_classes
def compute_performance_measures(
self, recognized_classes, ground_truths, act_namespaces,
utterances_annotation=None, save=True):
'''
Extract confusion matrix, accuracy and f scores from the given
<recognized_classes> predicted classes and <ground_truths> actual
classes. If <utterances_annotation> is given, metrics are
calculated (utterance level), else micro metrics are computed (frames
level)
'''
from sklearn import metrics
from scipy.linalg import block_diag
LOG.info('Computing performance measures for ' +
self.classifier_savename + ' with dataset:' +
self.testdataname)
y_trues = []
y_preds = []
utters = []
weights = []
confusion_matrices = []
f1_scores = []
existing_classes = []
accuracies = []
if not 'Sync' in self.classifiers_used:
recognized_classes = {self.action_type: recognized_classes}
ground_truths = {self.action_type: ground_truths}
act_namespaces = {self.action_type: act_namespaces}
if utterances_annotation is not None:
utterances_annotation = {
self.action_type: utterances_annotation}
if utterances_annotation is not None:
prefix = 'macro'
else:
prefix = 'micro'
undef_exists = False
undef_vec = []
for act_type in recognized_classes:
ground_truth = np.array(ground_truths[act_type]).ravel()
recognized_actions = np.array(recognized_classes[act_type])
act_names = act_namespaces[act_type]
fmask = np.isnan(ground_truth)
ground_truth[fmask] = -1
y_trues.append(ground_truth.astype(int))
recognized_actions[np.isnan(recognized_actions)] = -1
y_preds.append(np.array(recognized_actions))
y_preds[-1][np.isnan(y_preds[-1])] = -1
if utterances_annotation is not None:
utters.append(np.array(utterances_annotation[act_type]))
y_trues[-1], y_preds[-1] = co.macro_metrics.construct_vectors(
y_trues[-1], y_preds[-1], utters[-1])
weights.append(len(np.unique(ground_truth)))
fmask = y_trues[-1] != -1
y_trues[-1] = y_trues[-1][fmask]
y_preds[-1] = y_preds[-1][fmask]
flag_to_set_undef = np.sum(
y_preds[-1][:, None]
== np.unique(y_trues[-1])[None, :], axis=1) == 0
y_preds[-1][flag_to_set_undef] = -1
fsc = metrics.f1_score(y_trues[-1],
y_preds[-1],
average=None)
if -1 in y_preds[-1]:
fsc = fsc[1:]
f1_scores.append(np.atleast_2d(fsc))
accuracies.append(metrics.accuracy_score(y_trues[-1],
y_preds[-1]))
# now clean undefined from predicted too
conf_mat = metrics.confusion_matrix(
y_trues[-1], y_preds[-1])
if -1 in y_preds[-1]:
conf_mat = conf_mat[1:, :]
undef_vec.append(conf_mat[:,0])
conf_mat = conf_mat[:, 1:]
else:
undef_vec.append(np.zeros(conf_mat.shape[0]))
confusion_matrices.append(conf_mat)
classes = set(y_trues[-1].tolist() +
y_preds[-1].tolist())
classes.discard(-1)
classes = np.array(list(classes)).astype(int)
existing_classes += (np.array(
act_names)[classes]).tolist()
labels = existing_classes
labels_w_undef = (['Undefined'] + existing_classes if undef_exists
else existing_classes)
f1_scores = np.concatenate(f1_scores, axis=1)
actions_id = []
for clas in labels_w_undef:
actions_id.append(self.all_actions.index(clas) -1)
undef_vec = [vec for vec_list in undef_vec for vec in vec_list ]
confusion_matrix = block_diag(*tuple(confusion_matrices))
confusion_matrix = np.concatenate((confusion_matrix,
np.array(undef_vec).reshape(-1, 1)),
axis=1).astype(int)
accuracy = sum([accuracy * weight for accuracy, weight in
zip(accuracies, weights)]) / float(sum(weights))
if not self.online:
self.testdata[self.test_ind]['Accuracy'][
prefix.title()] = accuracy
self.testdata[self.test_ind]['PartialAccuracies'][
prefix.title()] = accuracies
self.testdata[self.test_ind]['FScores'][
prefix.title()] = [f1_scores, actions_id]
self.testdata[self.test_ind][
'ConfMat'][prefix.title()] = [confusion_matrix,
actions_id]
self.testdata[self.test_ind][
'Labels'][prefix.title()] = labels
LOG.info(prefix.title() + ' F1 Scores: \n' +
np.array2string(f1_scores))
LOG.info(
prefix.title() + ' Confusion Matrix: \n' +
np.array2string(
confusion_matrix))
if 'Sync' in self.classifiers_used:
LOG.info(prefix.title() + ' Partial Accuracies:' + str(accuracies))
LOG.info(prefix.title() + ' Accuracy: ' + str(accuracy))
LOG.info('Labels of actions:' + str(labels))
def write_metrics_to_file(self):
accuracies = self.testdata[self.test_ind]['Accuracy']
partial_accuracies = self.testdata[
self.test_ind]['PartialAccuracies']
if 'Sync' in self.classifiers_used:
acc_labels = (['Class.' + str(clas) for clas in
self.parameters['sub_classifiers']] +
['Total Mean'])
for cnt,acc_label in enumerate(acc_labels):
if 'mathregular' in acc_label:
import re
acc_labels[cnt] = re.sub('\\\\mathregular{(.*)}',
lambda x: x.group(1), acc_label)
ylabels = []
data = []
extra_locs = []
for key in partial_accuracies:
ylabels.append(key)
if not data:
data += [partial_accuracies[key], accuracies[key]]
extra_locs = ['right']
else:
data += [np.hstack((partial_accuracies[key],
accuracies[key]))]
extra_locs.append('bot')
with open(os.path.join(self.save_fold, 'sync_accuracy.tex'), 'w') as out:
out.write(co.latex.array_transcribe(data,
xlabels=acc_labels,
ylabels=ylabels,
sup_x_label='Accuracy',
extra_locs=extra_locs))
f1_scores = self.testdata[self.test_ind]['FScores']
for typ in f1_scores:
with open(os.path.join(self.save_fold, typ.title() +'_F1_Scores.tex'), 'w') as out:
out.write(co.latex.array_transcribe([f1_scores[typ][0],
np.atleast_2d(accuracies[typ])],
xlabels=np.concatenate((self.testdata[
self.test_ind]['Labels'][typ],
['Accuracy']), axis=0),
sup_x_label=typ.title() +
' Metrics',
extra_locs=['right']))
confmats = self.testdata[self.test_ind]['ConfMat']
for typ in confmats:
with open(os.path.join(self.save_fold,
typ.title()+'_Confusion_Matrix.tex'), 'w') as out:
ylabels = self.testdata[self.test_ind]['Labels'][typ]
xlabels = ylabels[:]
if confmats[typ][0].shape[0] != confmats[typ][0].shape[1]:
xlabels += ['Undefined']
out.write(co.latex.array_transcribe(confmats[typ][0],
ylabels=ylabels,
xlabels=xlabels,
sup_x_label='Predicted',
sup_y_label='Actual',
title=typ.title() +
' Metrics',
wrap=False))
def construct_classifiers_matrix(self):
'''
Constructs a table which shows most parameters of the trained
classifiers and saves it as a pdf inside Total Results folder
'''
from textwrap import TextWrapper
wrapper = TextWrapper(width=15, break_long_words=False,
break_on_hyphens=False, replace_whitespace=False)
all_parameters = [(self.trained_classifiers[name][2], self.classifiers_list[name])
for name in self.trained_classifiers if name in
self.classifiers_list]
all_parameters = sorted(all_parameters, key=lambda pair: pair[1])
params_rows = []
for parameters in all_parameters:
row = []
row.append(parameters[1])
row.append(parameters[0]['classifier'])
row.append('\n'.join(
parameters[0]['descriptors']))
row.append(parameters[0]['sparsecoded'])
if parameters[0]['sparsecoded']:
row.append('\n'.join(
['%d' % parameters[0]['sparse_params'][feature]
for feature in parameters[0]['descriptors']]))
else:
row.append('')
row.append(parameters[0]['passive'])
if (not parameters[0]['passive'] or
'Sync' in parameters[0]['classifier']):
'''
if parameters[0]['classifier']=='Double':
row.append('%d'%parameters[0]['classifier_params'][
'RDF_n_estimators'])
else:
row.append('')
'''
row.append('')
row.append(
str(parameters[0]['classifier_params']['SVM_kernel']))
try:
row.append('%d' % parameters[0]['dynamic_params'][
'buffer_size'])
except BaseException: row.append('')
try:
if parameters['testing_params'][
'post_scores_processing_method'] == 'CSTD':
row.append('%d' % parameters[0]['dynamic_params'][
'filter_window_size'])
except BaseException: row.append('')
try:
row.append(str(parameters[0]['PTPCA']))
except BaseException: row.append('')
if row[-1] != '' and row[-1] == 'True':
try:
row.append('%d' % parameters[0]['PTPCA_params'][
'PTPCA_components'])
except BaseException: row.append('')
else:
row.append('')
else:
row.append('%d' % parameters[0]['classifier_params'][
'RDF_n_estimators'])
[row.append('') for count in range(5)]
params_rows.append(row)
params_valids = np.tile(
(np.array(params_rows) != '')[..., None], (1, 1, 3))
params_valids = params_valids * 0.5
params_valids += 0.5
params_rows = [[wrapper.fill(el) if
isinstance(el, basestring) else el
for el in row] for row in params_rows]
params_col_names = ('Classifier', 'Type', 'Features', 'Sparse',
'Sparse Features\ndimension',
'Actions', 'Estimators', 'Kernel',
'Buffer size', 'Filter size', 'post PCA',
'post PCA\ncomponents')
all_results = [(self.classified_dict[item],
self.classifiers_list[item])
for item in self.classified_dict if item in
self.classifiers_list]
results_rows = []
for results in all_results:
row = []
row.append(str(results[1]))
mean = 0
for test in self.available_tests:
try:
row.append('%1.2f' % results[0][test][1][0])
except KeyError:
row.append('')
mean += results[0][test][1][0]
row.append('%1.2f' % (mean / float(len(results[0]))))
results_rows.append(row)
if results_rows:
results_col_names = ['Classifier'] + \
self.available_tests + ['Mean']
results_valids = np.tile(
(np.array(results_rows) != '')[..., None], (1, 1, 3))
results_rows = sorted(results_rows, key=lambda pair:
float(pair[-1]), reverse=True)
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
import datetime
#matplotlib.rcParams.update({'font.size': 22})
if self.save_fold is None:
save_fold = os.path.join(
co.CONST['results_fold'], 'Classification', 'Total')
co.makedir(save_fold)
filename = os.path.join(save_fold,
'matrices.pdf')
else:
filename = os.path.join(*self.save_fold.split(os.sep)[:-1] +
['Total', 'matrices.pdf'])
with PdfPages(filename) as pdf:
fig = plt.figure()
axes = fig.add_subplot(111)
params_table = co.table_oper.construct(axes, cellText=params_rows,
colLabels=params_col_names,
cellColours=params_valids,
cellLoc='center',
loc='center', usetex=True)
# params_table.auto_set_font_size(False)
params_table.scale(2, 2)
# params_table.set_fontsize(10)
co.table_oper.fit_cells_to_content(
fig, params_table, equal_height=True)
plt.axis('off')
pdf.savefig(
bbox_extra_artists=(
params_table,
),
bbox_inches='tight')
plt.close()
if results_rows:
fig = plt.figure()
axes = fig.add_subplot(111)
results_table = co.table_oper.construct(
axes,
cellText=results_rows,
colLabels=results_col_names,
cellColours=results_valids,
cellLoc='center',
loc='center')
co.table_oper.fit_cells_to_content(fig,
results_table,
equal_height=True)
results_table.scale(2, 2)
plt.axis('off')
pdf.savefig(
bbox_extra_artists=(
results_table,
),
bbox_inches='tight')
plt.close()
idd = pdf.infodict()
idd['Author'] = u'<NAME>'
idd['Subject'] = 'Unified Comparative View'
idd['Keywords'] = 'PdfPages multipage keywords author title subject'
idd['CreationDate'] = datetime.datetime.today()
def load_tests_mapping(self):
tests_catalog = co.file_oper.load_labeled_data(['Testing'],
fold_lev=1, just_catalog=True, include_all_catalog=
True)
return tests_catalog
def return_description(self, catalog, value):
try:
return catalog.keys()[catalog.values().index(value)]
except BaseException: LOG.warning('No such value inside catalog')
def load_all_test_instances(self, test_ind):
available_test_instances = {}
test_name = self.available_tests[test_ind]
import ast
loaded_instances, keys_list = co.file_oper.load_labeled_data(['Testing'],
fold_lev=1, all_inside=True)
for _id in loaded_instances:
loaded_instance = loaded_instances[_id]
if dict(keys_list[_id][0])['Test'] == test_name:
available_test_instances[_id] = loaded_instance
return available_test_instances
def extract_test_results_instances(self, test_ind, key,*keys):
if self.test_instances is None:
self.test_instances = self.load_all_test_instances(test_ind)
res = []
for entry in co.dict_oper.create_sorted_dict_view(self.test_instances):
if entry[1] is None:
res.append(None)
else:
res.append(co.dict_oper.lookup(entry[0], key,*keys))
return res
def correlate_with_ground_truth(self, save=True, display=False,
display_all=False, compute_perform=True,
utterances_inds=None):
'''
Plot results with title <title>
<display_all> if a plot of all the classifiers results is wanted
Do not classifiers_used this function if more than one classifiers are to be run in
parallel
'''
if self.parameters['testing_params']['online']:
recognized_classes = self.recognized_classes.recognition_vector
if isinstance(self.crossings, list):
self.crossings = np.array(self.crossings)
if self.test_ground_truth is not None and compute_perform:
self.compute_performance_measures(
self.recognized_classes,
ground_truths=self.test_ground_truth,
act_namespaces=self.train_classes,
save=save)
if utterances_inds is not None:
self.compute_performance_measures(
self.recognized_classes,
ground_truths=self.test_ground_truth,
utterances_annotation= utterances_inds,
act_namespaces=self.train_classes,
save=save)
else:
LOG.warning('Utterances Indices not passed to' +
' function. MacroMetrics won\'t be' +
' computed.')
if save and self.save_fold is not None:
self.write_metrics_to_file()
if save:
display = True
if display:
if display_all:
self.construct_classifiers_matrix()
iterat = []
iterat_name = []
for name in self.classifiers_list:
parameters = self.classified_dict[name][
self.testdataname][2]
recognized_actions = self.classified_dict[name][
self.testdataname][0]
if (parameters['action_type'] == self.action_type):
if 'Sync' in parameters['classifier']:
iterat.append(recognized_actions[self.action_type])
else:
iterat.append(recognized_actions)
iterat_name.append(int(self.classifiers_list[name]))
# sort iterat based on the index of classifier inside
# classifiers_list
iterat = [x for (_, x) in sorted(zip(iterat_name, iterat),
key=lambda pair: pair[0])]
# provide argsort using accuracy measures, to alter line width
higher_acc = sorted(range(len(iterat)), key=lambda
l: l[1][0], reverse=True)
else:
try:
iterat = [self.testdata[self.test_ind][
'Results']['Actions']]
available_ids = co.file_oper.load_labeled_data(
['Classifier'], just_catalog=True
)
if available_ids is not None:
try:
iterat_name = available_ids[str(
self.classifier_id)]
except KeyError:
iterat_name = str(len(available_ids))
else:
iterat_name = str(0)
higher_acc = [0]
if 'Sync' in self.classifiers_used:
new_iter = []
ref = 0
for key in iterat[0]:
new_iter.append(iterat[0][key])
new_iter[-1] = [item + ref for
item in new_iter[-1]]
ref += len(self.parameters['actions_params'][key])
iterat = new_iter
iterat_name = self.parameters['sub_classifiers']
higher_acc.append(1)
except KeyError as err:
LOG.warning(
str(err) + ' is missing from the tested datasets')
return False
plots = []
linewidths = []
labels = []
markers = []
markers_sizes = []
alphas = []
zorders = []
colors = []
xticks = None
width = 1
dec_q = 0.3
min_q = 0.2
from matplotlib.cm import get_cmap
available_colors = get_cmap('tab20b')
if self.crossings is not None:
xticks = self.crossings
expanded_xticks = np.zeros_like(self.test_ground_truth)
expanded_xticks[:] = None
expanded_xticks[xticks] = -1
plots.append(expanded_xticks)
alphas.append(1)
markers.append('o')
markers_sizes.append(5)
if 'Sync' in self.classifiers_used:
labels.append('Utterances\n'+
'Predicted\n'+
'break-\npoints\n'+
'by Cl_${dyn}$')
else:
labels.append('Utterances\n'+
'Predicted\n'+
'break-\npoints')
colors.append('green')
linewidths.append(1)
zorders.append(2)
for count in range(len(higher_acc)):
syncplots = []
plots.append(iterat[count])
if len(higher_acc) == 1:
labels.append('Predictions')
else:
labels.append('Class. ' +
str(iterat_name[count]) +
' Predictions')
markers.append(',')
markers_sizes.append(10)
linewidths.append(width)
colors.append(available_colors(count))
alphas.append(1)
zorders.append(3)
width -= dec_q
width = max(min_q, width)
if 'Sync' in self.classifiers_used:
yticks = []
for key in self.train_classes:
yticks += list(self.train_classes[key])
else:
yticks = self.train_classes
ylim = (-1, len(yticks) + 1)
fig, lgd, axes = self.plot_result(np.vstack(plots).T, labels=labels,
xticks_locs=xticks, ylim=ylim,
yticks_names=yticks,
markers=markers,
linewidths=linewidths,
alphas=alphas,
xlabel='Frames',
zorders=zorders,
markers_sizes= markers_sizes,
info='Classification Results',
save=False)
if self.test_breakpoints is not None:
if 'Sync' in self.classifiers_used:
tg_ref = 0
for key in (self.test_ground_truth):
self.draw_breakpoints(axes,
self.test_breakpoints[key],
yticks)
else:
self.draw_breakpoints(axes,
self.test_breakpoints, yticks)
if save:
save_info = 'Classification Results'
if self.crossings is not None:
save_info += ' with Crossings'
self.save_plot(fig, lgd, info=save_info)
return True
def draw_breakpoints(self, axes, breakpoints, yticks, lw=3,
zorder=2):
'''
Draws colored ground truth
'''
from matplotlib.cm import get_cmap
cmap = get_cmap('Spectral')
max_plotpoints_num = 0
for act in breakpoints:
max_plotpoints_num = max(max_plotpoints_num,
len(breakpoints[act][0]))
c_num = max_plotpoints_num
yticks = [ytick.lower() for ytick in yticks]
for act_cnt, act in enumerate(breakpoints):
drawn = 0
for cnt, (start, end) in enumerate(zip(breakpoints[act][0],
breakpoints[act][1])):
gest_dur = np.arange(int(start), int(end))
if act.lower() in yticks:
axes.plot(gest_dur, np.ones(gest_dur.size) *(
yticks.index(act.lower()) ),
color=cmap(cnt/float(c_num))
, linewidth=lw,
solid_capstyle="butt", zorder=zorder)
class RecognitionVectorizer(object):
'''
Class to hold classification classes
'''
def __init__(self, class_names):
self.name = ''
self.index = 0
self.start = 0
self.length = 0
self.names = class_names
self.recognition_vector = []
def add(self, index=None, start=None, length=None):
'''
Add Class with name, corresponding index in self.train_classes,
starting from start frame, with length length frames
'''
if index is not None:
self.index = int(index)
self.name = self.names[int(index)]
if start is not None:
self.start = start
self.recognition_vector.append(
[None] * (start - len(self.recognition_vector)))
if length is not None:
self.length = length
self.recognition_vector.append(
[self.index] * self.length)
def fake_online_testing(classifier, testname='actions', path=None):
'''
Immitate online testing for performance testing and debugging reasons
'''
if path is None:
path = os.path.join(co.CONST['test_' + testname], '0')
filenames = glob.glob(os.path.join(path, '*.png'))
sync = [int(filter(str.isdigit,
os.path.basename(filename)))
for filename in filenames]
[sync, filenames] = map(list, zip(*[[y, x] for (y, x) in
sorted(zip(sync, filenames),
key=lambda pair: pair[0])]))
txts = glob.glob(os.path.join(path, '*.txt'))
angles = []
centers = []
for txt in txts:
if 'angles' in txt:
with open(txt, 'r') as inpf:
angles += map(float, inpf)
elif 'centers' in txt:
with open(txt, 'r') as inpf:
for line in inpf:
center = [
float(num) for num
in line.split(' ')]
centers += [center]
for count, filename in enumerate(filenames):
img = cv2.imread(filename, -1)
# DEBUGGING
#cv2.imshow('test', (img % 255).astype(np.uint8))
# cv2.waitKey(10)
classifier.run_testing(
data=img,
img_count=sync[count],
online=True,
load=False,
derot_angle=angles[count],
derot_center=centers[count],
testdatapath=co.CONST['test_' + testname])
classifier.recognized_classes[-1].add(length=max(sync))
if len(classifier.scores_exist) > 1:
classifier.test_ground_truth = co.gd_oper.construct_ground_truth(
classifier.scores_exist, ground_truth_type=co.CONST['test_'
+ testname
+
'_ground_truth'],
classes_namespace=classifier.train_classes)
classifier.scores = np.array(
classifier.scores).squeeze()
classifier.scores_exist = np.array(classifier.scores_exist)
expanded_scores = np.zeros(
(len(classifier.scores_exist), classifier.scores.shape[1]))
expanded_scores[:] = np.NaN
expanded_scores[
classifier.scores_exist.astype(bool),
:] = classifier.scores
else:
expanded_scores = np.array(classifier.scores)
classifier.plot_result(expanded_scores,
labels=['%s' % classifier.train_classes[count]
for count in
range(expanded_scores.shape[1])],
info='Filtered Scores',
xlabel='Frames', save=True)
classifier.plot_result(np.concatenate((
classifier.filtered_scores_std,
classifier.filtered_scores_std_mean), axis=0).T,
colors=['r', 'g'],
labels=['STD', 'STD Mean'],
info='Filtered Scores Statistics',
xlabel='Frames', save=True)
def signal_handler(sig, frame):
'''
Signal handler for CTRL-C interrupt (SIGINT)
'''
LOG.info('\nGot SIGINT')
LOG.info('Exiting...')
loc = locals()
running_classifiers = [loc[key] for key in loc
if isinstance(loc[key], Classifier)
and loc[key].testing_initialized and
loc[key].online]
for classifier in running_classifiers:
classifier.display_scores_and_time()
if running_classifiers:
from matplotlib import pyplot as plt
plt.show()
sys.exit(0)
def construct_dynamic_actions_classifier(testname='test2', train=False,
test=True, visualize=True,
coders_retrain=False,
name='actions', sparsecoding_level=False,
sparse_dim_rat=None, test_against_all=False,
visualize_feat=False, kernel=None,
descriptors='GHOG', ptpca=False,
ptpca_components=1,
just_sparse=False,
debug=False,
classifiers_used='SVM',
action_type='Dynamic',
post_scores_processing_method='CSTD'):
'''
Constructs an SVM classifier with input 3DHOF and GHOG descriptors
'''
if sparsecoding_level:
if sparse_dim_rat is None:
sparse_dim_rat = co.CONST['sparse_dim_rat']
classifier = Classifier('INFO', action_type=action_type,
name=name, sparse_dim_rat=sparse_dim_rat,
sparsecoding_level=sparsecoding_level,
descriptors=descriptors,
kernel=kernel, ptpca=ptpca,
ptpca_components=ptpca_components,
classifiers_used=classifiers_used, post_scores_processing_method= post_scores_processing_method)
if debug:
classifier.debug = True
classifier.run_training(classifiers_retrain=train,
coders_retrain=coders_retrain,
visualize_feat=visualize_feat,
just_sparse=just_sparse,
# init_sc_traindata_num=5000,
init_sc_traindata_num=15000,
min_dict_iterations=20)
if test or visualize:
if test_against_all:
iterat = classifier.available_tests
else:
iterat = [testname]
for name in iterat:
if test:
classifier.run_testing(name,
ground_truth_type=os.path.join(
co.CONST['ground_truth_fold'],
name + '.csv'),
online=False, load=False)
else:
classifier.run_testing(name,
ground_truth_type=os.path.join(
co.CONST['ground_truth_fold'],
name + '.csv'),
online=False, load=False)
return classifier
def construct_passive_actions_classifier(testname='test2',
train=True, test=True, visualize=True,
test_against_all=False,
descriptors='3DXYPCA',
post_scores_processing_method='CProb',
for_app=False):
'''
Constructs a random forests passive_actions classifier with input 3DXYPCA descriptors
'''
classifier = Classifier('INFO', action_type='Passive',
name='actions', classifiers_used='RDF',
sparsecoding_level=False,
descriptors=descriptors,
post_scores_processing_method=
post_scores_processing_method, for_app=for_app)
classifier.run_training(classifiers_retrain=train,
max_act_samples=2000)
if test or visualize:
if test_against_all:
iterat = classifier.available_tests
else:
iterat = [testname]
for name in iterat:
if test:
classifier.run_testing(name,
ground_truth_type=os.path.join(
co.CONST['ground_truth_fold'],
name + '.csv'),
online=False, load=False)
else:
classifier.run_testing(name,
ground_truth_type=os.path.join(
co.CONST['ground_truth_fold'],
name + '.csv'),
online=False, load=False)
return classifier
def construct_total_statistics():
'''
Construct unified plots for all classifiers, tested on all tests
'''
classifier = Classifier()
classifier.action_type = 'Dynamic'
iterat = classifier.available_tests
classifier.scores_exist = None
classifier.recognized_classes = None
for test in iterat:
classifier.test_ground_truth = co.gd_oper.construct_ground_truth(
ground_truth_type=os.path.join(co.CONST['test_save_path'], test),
classes_namespace=classifier.dynamic_actions)
classifier.correlate_with_ground_truth(save=True,
display=True,
display_all=True,
compute_perform=True)
def main():
'''
Example Usage
'''
from matplotlib import pyplot as plt
testname = 'actions'
# construct_passive_actions_classifier(test)
# plt.show()
'''
TRAIN_ALL_SPARSE = construct_dynamic_actions_classifier(
train=[0],descriptors=['GHOG', 'ZHOF', '3DHOF', '3DXYPCA'],
sparsecoding_level=True,just_sparse=True, debug=False)
'''
'''
POSES_CLASSIFIER = construct_passive_actions_classifier(train=True,
test=True,
visualize=True,
test_against_all=True)
'''
ACTIONS_CLASSIFIER_SIMPLE = construct_dynamic_actions_classifier(
train=True,
test=True,
visualize=True,
test_against_all=True,
ptpca=False,
classifiers_used='RDF',
descriptors=['GHOG','ZHOF'],
post_scores_processing_method='CSTD')
exit()
'''
ACTIONS_CLASSIFIER_SPARSE = construct_dynamic_actions_classifier(train=True,
coders_retrain=False,
test=True,
visualize=True,
test_against_all=True,
sparsecoding_level='Features',
classifiers_used='RDF')
ACTIONS_CLASSIFIER_SIMPLE_POST_PCA = construct_dynamic_actions_classifier(
train=True,
test=True,
visualize=True,
test_against_all=True,
ptpca=True,
ptpca_components=2)
construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['3DHOF'],ptpca=False,sparsecoding_level=False)
'''
'''
construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['ZHOF'], ptpca=False, sparsecoding_level=False)
'''
'''
ACTIONS_CLASSIFIER_SPARSE_WITH_3DHOF = construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', '3DHOF'], coders_retrain=False, sparsecoding_level=True,
kernel='linear')
ACTIONS_CLASSIFIER_SIMPLE_WITH_3DHOF_POST_PCA = construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', '3DHOF'], ptpca=True, ptpca_components=4)
construct_dynamic_actions_classifier(
#debugging, train=True, test=True,
train=True, test=True,
visualize=True, test_against_all=True,
descriptors=['GHOG', '3DHOF'], ptpca=True, sparsecoding_level=True,
ptpca_components=2)
'''
'''
ACTIONS_CLASSIFIER_SIMPLE_WITH_3DHOF = construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', '3DHOF'], kernel='linear')
'''
'''
ACTIONS_CLASSIFIER_SIMPLE_WITH_ZHOF = construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', 'ZHOF'])
'''
'''
ACTIONS_CLASSIFIER_SIMPLE_WITH_ZHOF = construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['3DXYPCA','GHOG','3DHOF','ZHOF'], classifiers_used='SVM')
'''
# Let's try RDF for dynamic actions
construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', 'ZHOF'], ptpca=False, sparsecoding_level=False,
classifiers_used='RDF')
exit()
# Let's try RDF with all descriptors for dynamic actions
construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', '3DHOF', '3DXYPCA'], ptpca=False, sparsecoding_level=False,
classifiers_used='RDF')
# Let's try RDF for all descriptors for all actions
construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', '3DHOF', '3DXYPCA'], action_type='All', ptpca=False, sparsecoding_level=False,
classifiers_used='RDF')
# Let's try RDF with all descriptors for dynamic actions
construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', 'ZHOF', '3DXYPCA'], action_type='Dynamic', ptpca=False, sparsecoding_level=False,
classifiers_used='RDF')
# Let's try RDF for all descriptors for all actions
construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', 'ZHOF', '3DXYPCA'], action_type='All', ptpca=False, sparsecoding_level=False,
classifiers_used='RDF')
ACTIONS_CLASSIFIER_SPARSE_WITH_ZHOF = construct_dynamic_actions_classifier(
train=True,
test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', 'ZHOF'], coders_retrain=False, sparsecoding_level=True,
kernel='linear')
ACTIONS_CLASSIFIER_SIMPLE_WITH_ZHOF_POST_PCA = construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', 'ZHOF'], ptpca=True, ptpca_components=4)
construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG', 'ZHOF'], ptpca=True,
sparsecoding_level=True, coders_retrain=False, ptpca_components=4)
'''
ACTIONS_CLASSIFIER_SPARSE_WITH_ZHOF_RBF = construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG','ZHOF'], coders_retrain=False, sparsecoding_level=True,
kernel='rbf')
ACTIONS_CLASSIFIER_SIMPLE_WITH_ZHOF_RBF = construct_dynamic_actions_classifier(
train=True, test=True, visualize=True, test_against_all=True,
descriptors=['GHOG','ZHOF'], coders_retrain=False, sparsecoding_level=False,
kernel='rbf')
ACTIONS_CLASSIFIER_SIMPLE_RBF = construct_dynamic_actions_classifier(
train=False,
test=False,
visualize=False,
sparsecoding_level=False,
test_against_all=True,
kernel='rbf')
'''
# construct classifiers comparative table
tmp = Classifier(descriptors=[''])
tmp.construct_classifiers_matrix()
sys.exit(0)
# visualize_feat=True)
# visualize_feat=['Fingerwave in'])
LOG = logging.getLogger('__name__')
CH = logging.StreamHandler(sys.stderr)
CH.setFormatter(logging.Formatter(
'%(funcName)20s()(%(lineno)s)-%(levelname)s:%(message)s'))
LOG.handlers = []
LOG.addHandler(CH)
LOG.setLevel(logging.INFO)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"matplotlib.pyplot.ylabel",
"class_objects.macro_metrics.construct_vectors",
"numpy.array2string",
"sklearn.ensemble.AdaBoostClassifier",
"numpy.logical_not",
"action_recognition_alg.ActionRecognition",
"numpy.hstack",
"numpy.array",
"numpy.isfinite"... | [((121963, 121992), 'logging.getLogger', 'logging.getLogger', (['"""__name__"""'], {}), "('__name__')\n", (121980, 121992), False, 'import logging\n'), ((121998, 122031), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stderr'], {}), '(sys.stderr)\n', (122019, 122031), False, 'import logging\n'), ((109670, 109681), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (109678, 109681), False, 'import sys\n'), ((121869, 121880), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (121877, 121880), False, 'import sys\n'), ((122048, 122123), 'logging.Formatter', 'logging.Formatter', (['"""%(funcName)20s()(%(lineno)s)-%(levelname)s:%(message)s"""'], {}), "('%(funcName)20s()(%(lineno)s)-%(levelname)s:%(message)s')\n", (122065, 122123), False, 'import logging\n'), ((122225, 122269), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (122238, 122269), False, 'import signal\n'), ((622, 633), 'time.time', 'time.time', ([], {}), '()\n', (631, 633), False, 'import time\n'), ((684, 695), 'time.time', 'time.time', ([], {}), '()\n', (693, 695), False, 'import time\n'), ((9776, 9831), 'action_recognition_alg.ActionRecognition', 'ara.ActionRecognition', (['self.parameters'], {'log_lev': 'log_lev'}), '(self.parameters, log_lev=log_lev)\n', (9797, 9831), True, 'import action_recognition_alg as ara\n'), ((14289, 14329), 'action_recognition_alg.FramesPreprocessing', 'ara.FramesPreprocessing', (['self.parameters'], {}), '(self.parameters)\n', (14312, 14329), True, 'import action_recognition_alg as ara\n'), ((20884, 21009), 'class_objects.file_oper.save_labeled_data', 'co.file_oper.save_labeled_data', (["(['Classifier'] + self.classifier_id)", '[self.unified_classifier, self.training_parameters]'], {}), "(['Classifier'] + self.classifier_id, [self.\n unified_classifier, self.training_parameters])\n", (20914, 21009), True, 'import class_objects as co\n'), ((21092, 21214), 'class_objects.file_oper.save_labeled_data', 'co.file_oper.save_labeled_data', (["['Classifier']", '[self.unified_classifier, self.training_parameters]'], {'name': 'self.app_dir'}), "(['Classifier'], [self.unified_classifier,\n self.training_parameters], name=self.app_dir)\n", (21122, 21214), True, 'import class_objects as co\n'), ((39179, 39190), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (39188, 39190), True, 'from matplotlib import pyplot as plt\n'), ((40077, 40091), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (40089, 40091), True, 'from matplotlib import pyplot as plt\n'), ((42407, 42425), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (42417, 42425), True, 'from matplotlib import pyplot as plt\n'), ((42434, 42452), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (42444, 42452), True, 'from matplotlib import pyplot as plt\n'), ((56730, 56803), 'class_objects.gd_oper.load_ground_truth', 'co.gd_oper.load_ground_truth', (['action'], {'ret_labs': '(True)', 'ret_breakpoints': '(True)'}), '(action, ret_labs=True, ret_breakpoints=True)\n', (56758, 56803), True, 'import class_objects as co\n'), ((57221, 57283), 'class_objects.imfold_oper.load_frames_data', 'co.imfold_oper.load_frames_data', (['images_loc'], {'masks_needed': '(True)'}), '(images_loc, masks_needed=True)\n', (57252, 57283), True, 'import class_objects as co\n'), ((58760, 58940), 'class_objects.draw_oper.plot_utterances', 'co.draw_oper.plot_utterances', (['*args'], {'frames': 'cropped_imgs', 'frames_sync': 'sync', 'ground_truth': 'ground_truth', 'breakpoints': 'breakpoints', 'labels': 'labels', 'dataset_name': 'action'}), '(*args, frames=cropped_imgs, frames_sync=sync,\n ground_truth=ground_truth, breakpoints=breakpoints, labels=labels,\n dataset_name=action, **kwargs)\n', (58788, 58940), True, 'import class_objects as co\n'), ((81403, 81436), 'numpy.concatenate', 'np.concatenate', (['f1_scores'], {'axis': '(1)'}), '(f1_scores, axis=1)\n', (81417, 81436), True, 'import numpy as np\n'), ((86810, 86909), 'textwrap.TextWrapper', 'TextWrapper', ([], {'width': '(15)', 'break_long_words': '(False)', 'break_on_hyphens': '(False)', 'replace_whitespace': '(False)'}), '(width=15, break_long_words=False, break_on_hyphens=False,\n replace_whitespace=False)\n', (86821, 86909), False, 'from textwrap import TextWrapper\n'), ((94087, 94191), 'class_objects.file_oper.load_labeled_data', 'co.file_oper.load_labeled_data', (["['Testing']"], {'fold_lev': '(1)', 'just_catalog': '(True)', 'include_all_catalog': '(True)'}), "(['Testing'], fold_lev=1, just_catalog=True,\n include_all_catalog=True)\n", (94117, 94191), True, 'import class_objects as co\n'), ((94738, 94810), 'class_objects.file_oper.load_labeled_data', 'co.file_oper.load_labeled_data', (["['Testing']"], {'fold_lev': '(1)', 'all_inside': '(True)'}), "(['Testing'], fold_lev=1, all_inside=True)\n", (94768, 94810), True, 'import class_objects as co\n'), ((95354, 95411), 'class_objects.dict_oper.create_sorted_dict_view', 'co.dict_oper.create_sorted_dict_view', (['self.test_instances'], {}), '(self.test_instances)\n', (95390, 95411), True, 'import class_objects as co\n'), ((104061, 104081), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (104069, 104081), False, 'from matplotlib.cm import get_cmap\n'), ((107073, 107097), 'cv2.imread', 'cv2.imread', (['filename', '(-1)'], {}), '(filename, -1)\n', (107083, 107097), False, 'import cv2\n'), ((107615, 107791), 'class_objects.gd_oper.construct_ground_truth', 'co.gd_oper.construct_ground_truth', (['classifier.scores_exist'], {'ground_truth_type': "co.CONST['test_' + testname + '_ground_truth']", 'classes_namespace': 'classifier.train_classes'}), "(classifier.scores_exist,\n ground_truth_type=co.CONST['test_' + testname + '_ground_truth'],\n classes_namespace=classifier.train_classes)\n", (107648, 107791), True, 'import class_objects as co\n'), ((108114, 108147), 'numpy.array', 'np.array', (['classifier.scores_exist'], {}), '(classifier.scores_exist)\n', (108122, 108147), True, 'import numpy as np\n'), ((108438, 108465), 'numpy.array', 'np.array', (['classifier.scores'], {}), '(classifier.scores)\n', (108446, 108465), True, 'import numpy as np\n'), ((109655, 109665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (109663, 109665), True, 'from matplotlib import pyplot as plt\n'), ((10182, 10261), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'class_weight': '"""balanced"""', 'C': 'self.svm_c', 'multi_class': '"""ovr"""', 'dual': '(False)'}), "(class_weight='balanced', C=self.svm_c, multi_class='ovr', dual=False)\n", (10191, 10261), False, 'from sklearn.svm import LinearSVC\n'), ((12925, 12992), 'class_objects.file_oper.load_labeled_data', 'co.file_oper.load_labeled_data', (["(['Classifier'] + self.classifier_id)"], {}), "(['Classifier'] + self.classifier_id)\n", (12955, 12992), True, 'import class_objects as co\n'), ((13022, 13144), 'class_objects.file_oper.save_labeled_data', 'co.file_oper.save_labeled_data', (["['Classifier']", '[self.unified_classifier, self.training_parameters]'], {'name': 'self.app_dir'}), "(['Classifier'], [self.unified_classifier,\n self.training_parameters], name=self.app_dir)\n", (13052, 13144), True, 'import class_objects as co\n'), ((14375, 14403), 'action_recognition_alg.Actions', 'ara.Actions', (['self.parameters'], {}), '(self.parameters)\n', (14386, 14403), True, 'import action_recognition_alg as ara\n'), ((24620, 24656), 'cPickle.dump', 'pickle.dump', (['self.train_classes', 'out'], {}), '(self.train_classes, out)\n', (24631, 24656), True, 'import cPickle as pickle\n'), ((30350, 30383), 'numpy.array', 'np.array', (['self.train_ground_truth'], {}), '(self.train_ground_truth)\n', (30358, 30383), True, 'import numpy as np\n'), ((35201, 35219), 'numpy.mean', 'np.mean', (['self.time'], {}), '(self.time)\n', (35208, 35219), True, 'import numpy as np\n'), ((42494, 42508), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (42502, 42508), True, 'from matplotlib import pyplot as plt\n'), ((42550, 42564), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (42558, 42564), True, 'from matplotlib import pyplot as plt\n'), ((45633, 45659), 'class_objects.makedir', 'co.makedir', (['self.save_fold'], {}), '(self.save_fold)\n', (45643, 45659), True, 'import class_objects as co\n'), ((67872, 67910), 'numpy.array', 'np.array', (['self.scores_running_mean_vec'], {}), '(self.scores_running_mean_vec)\n', (67880, 67910), True, 'import numpy as np\n'), ((68056, 68079), 'numpy.std', 'np.std', (['self.scores[-1]'], {}), '(self.scores[-1])\n', (68062, 68079), True, 'import numpy as np\n'), ((68423, 68463), 'numpy.mean', 'np.mean', (['self.small_std_running_mean_vec'], {}), '(self.small_std_running_mean_vec)\n', (68430, 68463), True, 'import numpy as np\n'), ((72031, 72092), 'class_objects.noise_proc.masked_filter', 'co.noise_proc.masked_filter', (['scores', 'self.scores_filter_shape'], {}), '(scores, self.scores_filter_shape)\n', (72058, 72092), True, 'import class_objects as co\n'), ((79071, 79109), 'numpy.array', 'np.array', (['recognized_classes[act_type]'], {}), '(recognized_classes[act_type])\n', (79079, 79109), True, 'import numpy as np\n'), ((79179, 79201), 'numpy.isnan', 'np.isnan', (['ground_truth'], {}), '(ground_truth)\n', (79187, 79201), True, 'import numpy as np\n'), ((80120, 80176), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_trues[-1]', 'y_preds[-1]'], {'average': 'None'}), '(y_trues[-1], y_preds[-1], average=None)\n', (80136, 80176), False, 'from sklearn import metrics\n'), ((80585, 80635), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_trues[-1]', 'y_preds[-1]'], {}), '(y_trues[-1], y_preds[-1])\n', (80609, 80635), False, 'from sklearn import metrics\n'), ((91684, 91705), 'class_objects.makedir', 'co.makedir', (['save_fold'], {}), '(save_fold)\n', (91694, 91705), True, 'import class_objects as co\n'), ((91969, 91987), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['filename'], {}), '(filename)\n', (91977, 91987), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((92014, 92026), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (92024, 92026), True, 'from matplotlib import pyplot as plt\n'), ((92094, 92255), 'class_objects.table_oper.construct', 'co.table_oper.construct', (['axes'], {'cellText': 'params_rows', 'colLabels': 'params_col_names', 'cellColours': 'params_valids', 'cellLoc': '"""center"""', 'loc': '"""center"""', 'usetex': '(True)'}), "(axes, cellText=params_rows, colLabels=\n params_col_names, cellColours=params_valids, cellLoc='center', loc=\n 'center', usetex=True)\n", (92117, 92255), True, 'import class_objects as co\n'), ((92596, 92668), 'class_objects.table_oper.fit_cells_to_content', 'co.table_oper.fit_cells_to_content', (['fig', 'params_table'], {'equal_height': '(True)'}), '(fig, params_table, equal_height=True)\n', (92630, 92668), True, 'import class_objects as co\n'), ((92698, 92713), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (92706, 92713), True, 'from matplotlib import pyplot as plt\n'), ((92878, 92889), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (92887, 92889), True, 'from matplotlib import pyplot as plt\n'), ((94002, 94027), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (94025, 94027), False, 'import datetime\n'), ((100381, 100399), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""tab20b"""'], {}), "('tab20b')\n", (100389, 100399), False, 'from matplotlib.cm import get_cmap\n'), ((108834, 108932), 'numpy.concatenate', 'np.concatenate', (['(classifier.filtered_scores_std, classifier.filtered_scores_std_mean)'], {'axis': '(0)'}), '((classifier.filtered_scores_std, classifier.\n filtered_scores_std_mean), axis=0)\n', (108848, 108932), True, 'import numpy as np\n'), ((10472, 10517), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (['self.RDF_n_estimators'], {}), '(self.RDF_n_estimators)\n', (10494, 10517), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14674, 14711), 'action_recognition_alg.BufferOperations', 'ara.BufferOperations', (['self.parameters'], {}), '(self.parameters)\n', (14694, 14711), True, 'import action_recognition_alg as ara\n'), ((15648, 15715), 'class_objects.file_oper.load_labeled_data', 'co.file_oper.load_labeled_data', (["(['Testing'] + self.tests_ids[count])"], {}), "(['Testing'] + self.tests_ids[count])\n", (15678, 15715), True, 'import class_objects as co\n'), ((29986, 30015), 'numpy.vstack', 'np.vstack', (['self.training_data'], {}), '(self.training_data)\n', (29995, 30015), True, 'import numpy as np\n'), ((32566, 32695), 'OptGridSearchCV.optGridSearchCV', 'optGridSearchCV', (['self.classifier_type', 'self.training_data', 'self.train_ground_truth', 'grid_search_params'], {'n_jobs': '(4)', 'fold_num': '(3)'}), '(self.classifier_type, self.training_data, self.\n train_ground_truth, grid_search_params, n_jobs=4, fold_num=3)\n', (32581, 32695), False, 'from OptGridSearchCV import optGridSearchCV\n'), ((39017, 39038), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (39028, 39038), True, 'from matplotlib import pyplot as plt\n'), ((39073, 39142), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_extra_artists': '(lgd,)', 'bbox_inches': '"""tight"""'}), "(filename, bbox_extra_artists=(lgd,), bbox_inches='tight')\n", (39084, 39142), True, 'from matplotlib import pyplot as plt\n'), ((40035, 40054), 'numpy.atleast_2d', 'np.atleast_2d', (['data'], {}), '(data)\n', (40048, 40054), True, 'import numpy as np\n'), ((41584, 41649), 'class_objects.plot_oper.put_legend_outside_plot', 'co.plot_oper.put_legend_outside_plot', (['axes'], {'already_reshaped': '(True)'}), '(axes, already_reshaped=True)\n', (41620, 41649), True, 'import class_objects as co\n'), ((42286, 42346), 'matplotlib.pyplot.title', 'plt.title', (["(self.testname + '\\n Dataset ' + self.testdataname)"], {}), "(self.testname + '\\n Dataset ' + self.testdataname)\n", (42295, 42346), True, 'from matplotlib import pyplot as plt\n'), ((44453, 44545), 'class_objects.file_oper.load_labeled_data', 'co.file_oper.load_labeled_data', (["['Testing']"], {'just_catalog': '(True)', 'include_all_catalog': '(True)'}), "(['Testing'], just_catalog=True,\n include_all_catalog=True)\n", (44483, 44545), True, 'import class_objects as co\n'), ((51498, 51548), 'numpy.zeros', 'np.zeros', (['(testdata.shape[0], fin_scores.shape[1])'], {}), '((testdata.shape[0], fin_scores.shape[1]))\n', (51506, 51548), True, 'import numpy as np\n'), ((53301, 53410), 'class_objects.file_oper.save_labeled_data', 'co.file_oper.save_labeled_data', (["(['Testing'] + self.tests_ids[self.test_ind])", 'self.testdata[self.test_ind]'], {}), "(['Testing'] + self.tests_ids[self.test_ind],\n self.testdata[self.test_ind])\n", (53331, 53410), True, 'import class_objects as co\n'), ((57477, 57541), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (57493, 57541), False, 'import cv2\n'), ((57589, 57610), 'cv2.contourArea', 'cv2.contourArea', (['cont'], {}), '(cont)\n', (57604, 57610), False, 'import cv2\n'), ((57653, 57675), 'numpy.sum', 'np.sum', (['(mask * img > 0)'], {}), '(mask * img > 0)\n', (57659, 57675), True, 'import numpy as np\n'), ((57689, 57711), 'numpy.sum', 'np.sum', (['(mask * img > 0)'], {}), '(mask * img > 0)\n', (57695, 57711), True, 'import numpy as np\n'), ((57859, 57881), 'cv2.boundingRect', 'cv2.boundingRect', (['cont'], {}), '(cont)\n', (57875, 57881), False, 'import cv2\n'), ((58529, 58551), 'numpy.zeros', 'np.zeros', (['(ydim, xdim)'], {}), '((ydim, xdim))\n', (58537, 58551), True, 'import numpy as np\n'), ((61970, 61991), 'numpy.array', 'np.array', (['self.scores'], {}), '(self.scores)\n', (61978, 61991), True, 'import numpy as np\n'), ((69047, 69099), 'numpy.mean', 'np.mean', (['self.big_std_running_mean_vec[-start_from:]'], {}), '(self.big_std_running_mean_vec[-start_from:])\n', (69054, 69099), True, 'import numpy as np\n'), ((70203, 70229), 'numpy.argmax', 'np.argmax', (['self.scores[-1]'], {}), '(self.scores[-1])\n', (70212, 70229), True, 'import numpy as np\n'), ((72904, 72929), 'numpy.zeros', 'np.zeros', (['scores.shape[0]'], {}), '(scores.shape[0])\n', (72912, 72929), True, 'import numpy as np\n'), ((72995, 73027), 'numpy.std', 'np.std', (['scores[fmask, :]'], {'axis': '(1)'}), '(scores[fmask, :], axis=1)\n', (73001, 73027), True, 'import numpy as np\n'), ((73106, 73174), 'class_objects.noise_proc.masked_filter', 'co.noise_proc.masked_filter', (['scores_std', 'self.std_small_filter_shape'], {}), '(scores_std, self.std_small_filter_shape)\n', (73133, 73174), True, 'import class_objects as co\n'), ((73232, 73298), 'class_objects.noise_proc.masked_filter', 'co.noise_proc.masked_filter', (['scores_std', 'self.std_big_filter_shape'], {}), '(scores_std, self.std_big_filter_shape)\n', (73259, 73298), True, 'import class_objects as co\n'), ((73355, 73380), 'numpy.zeros', 'np.zeros', (['scores.shape[0]'], {}), '(scores.shape[0])\n', (73363, 73380), True, 'import numpy as np\n'), ((79323, 79351), 'numpy.isnan', 'np.isnan', (['recognized_actions'], {}), '(recognized_actions)\n', (79331, 79351), True, 'import numpy as np\n'), ((79385, 79413), 'numpy.array', 'np.array', (['recognized_actions'], {}), '(recognized_actions)\n', (79393, 79413), True, 'import numpy as np\n'), ((79439, 79460), 'numpy.isnan', 'np.isnan', (['y_preds[-1]'], {}), '(y_preds[-1])\n', (79447, 79460), True, 'import numpy as np\n'), ((79633, 79705), 'class_objects.macro_metrics.construct_vectors', 'co.macro_metrics.construct_vectors', (['y_trues[-1]', 'y_preds[-1]', 'utters[-1]'], {}), '(y_trues[-1], y_preds[-1], utters[-1])\n', (79667, 79705), True, 'import class_objects as co\n'), ((80356, 80374), 'numpy.atleast_2d', 'np.atleast_2d', (['fsc'], {}), '(fsc)\n', (80369, 80374), True, 'import numpy as np\n'), ((80406, 80454), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_trues[-1]', 'y_preds[-1]'], {}), '(y_trues[-1], y_preds[-1])\n', (80428, 80454), False, 'from sklearn import metrics\n'), ((82713, 82739), 'numpy.array2string', 'np.array2string', (['f1_scores'], {}), '(f1_scores)\n', (82728, 82739), True, 'import numpy as np\n'), ((82826, 82859), 'numpy.array2string', 'np.array2string', (['confusion_matrix'], {}), '(confusion_matrix)\n', (82841, 82859), True, 'import numpy as np\n'), ((92941, 92953), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (92951, 92953), True, 'from matplotlib import pyplot as plt\n'), ((93030, 93181), 'class_objects.table_oper.construct', 'co.table_oper.construct', (['axes'], {'cellText': 'results_rows', 'colLabels': 'results_col_names', 'cellColours': 'results_valids', 'cellLoc': '"""center"""', 'loc': '"""center"""'}), "(axes, cellText=results_rows, colLabels=\n results_col_names, cellColours=results_valids, cellLoc='center', loc=\n 'center')\n", (93053, 93181), True, 'import class_objects as co\n'), ((93309, 93382), 'class_objects.table_oper.fit_cells_to_content', 'co.table_oper.fit_cells_to_content', (['fig', 'results_table'], {'equal_height': '(True)'}), '(fig, results_table, equal_height=True)\n', (93343, 93382), True, 'import class_objects as co\n'), ((93543, 93558), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (93551, 93558), True, 'from matplotlib import pyplot as plt\n'), ((93748, 93759), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (93757, 93759), True, 'from matplotlib import pyplot as plt\n'), ((96250, 96274), 'numpy.array', 'np.array', (['self.crossings'], {}), '(self.crossings)\n', (96258, 96274), True, 'import numpy as np\n'), ((100517, 100554), 'numpy.zeros_like', 'np.zeros_like', (['self.test_ground_truth'], {}), '(self.test_ground_truth)\n', (100530, 100554), True, 'import numpy as np\n'), ((108029, 108056), 'numpy.array', 'np.array', (['classifier.scores'], {}), '(classifier.scores)\n', (108037, 108056), True, 'import numpy as np\n'), ((10680, 10739), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'n_estimators': 'self.AdaBoost_n_estimators'}), '(n_estimators=self.AdaBoost_n_estimators)\n', (10698, 10739), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((13676, 13741), 'class_objects.file_oper.load_labeled_data', 'co.file_oper.load_labeled_data', (["['Classifier']"], {'name': 'self.app_dir'}), "(['Classifier'], name=self.app_dir)\n", (13706, 13741), True, 'import class_objects as co\n'), ((29564, 29689), 'class_objects.preproc_oper.equalize_samples', 'co.preproc_oper.equalize_samples', ([], {'samples': 'self.training_data', 'utterance_indices': 'self.training_samples_inds', 'mode': '"""random"""'}), "(samples=self.training_data,\n utterance_indices=self.training_samples_inds, mode='random')\n", (29596, 29689), True, 'import class_objects as co\n'), ((30233, 30264), 'numpy.isfinite', 'np.isfinite', (['self.training_data'], {}), '(self.training_data)\n', (30244, 30264), True, 'import numpy as np\n'), ((34630, 34654), 'numpy.mean', 'np.mean', (['feat_times[key]'], {}), '(feat_times[key])\n', (34637, 34654), True, 'import numpy as np\n'), ((38198, 38224), 'class_objects.makedir', 'co.makedir', (['self.save_fold'], {}), '(self.save_fold)\n', (38208, 38224), True, 'import class_objects as co\n'), ((41017, 41036), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (41033, 41036), True, 'import numpy as np\n'), ((57806, 57828), 'numpy.argmax', 'np.argmax', (['conts_areas'], {}), '(conts_areas)\n', (57815, 57828), True, 'import numpy as np\n'), ((64897, 64912), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (64905, 64912), True, 'import numpy as np\n'), ((67073, 67088), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (67081, 67088), True, 'import numpy as np\n'), ((70680, 70706), 'numpy.argmax', 'np.argmax', (['self.scores[-1]'], {}), '(self.scores[-1])\n', (70689, 70706), True, 'import numpy as np\n'), ((71410, 71433), 'numpy.max', 'np.max', (['self.scores[-1]'], {}), '(self.scores[-1])\n', (71416, 71433), True, 'import numpy as np\n'), ((72525, 72556), 'numpy.isnan', 'np.isnan', (['extracted_actions[-1]'], {}), '(extracted_actions[-1])\n', (72533, 72556), True, 'import numpy as np\n'), ((73747, 73791), 'numpy.where', 'np.where', (['(positive[1:] - positive[:-1] == -1)'], {}), '(positive[1:] - positive[:-1] == -1)\n', (73755, 73791), True, 'import numpy as np\n'), ((78996, 79029), 'numpy.array', 'np.array', (['ground_truths[act_type]'], {}), '(ground_truths[act_type])\n', (79004, 79029), True, 'import numpy as np\n'), ((79547, 79588), 'numpy.array', 'np.array', (['utterances_annotation[act_type]'], {}), '(utterances_annotation[act_type])\n', (79555, 79588), True, 'import numpy as np\n'), ((79758, 79781), 'numpy.unique', 'np.unique', (['ground_truth'], {}), '(ground_truth)\n', (79767, 79781), True, 'import numpy as np\n'), ((80872, 80899), 'numpy.zeros', 'np.zeros', (['conf_mat.shape[0]'], {}), '(conf_mat.shape[0])\n', (80880, 80899), True, 'import numpy as np\n'), ((84386, 84505), 'class_objects.latex.array_transcribe', 'co.latex.array_transcribe', (['data'], {'xlabels': 'acc_labels', 'ylabels': 'ylabels', 'sup_x_label': '"""Accuracy"""', 'extra_locs': 'extra_locs'}), "(data, xlabels=acc_labels, ylabels=ylabels,\n sup_x_label='Accuracy', extra_locs=extra_locs)\n", (84411, 84505), True, 'import class_objects as co\n'), ((89673, 89694), 'numpy.array', 'np.array', (['params_rows'], {}), '(params_rows)\n', (89681, 89694), True, 'import numpy as np\n'), ((95524, 95565), 'class_objects.dict_oper.lookup', 'co.dict_oper.lookup', (['entry[0]', 'key', '*keys'], {}), '(entry[0], key, *keys)\n', (95543, 95565), True, 'import class_objects as co\n'), ((98706, 98771), 'class_objects.file_oper.load_labeled_data', 'co.file_oper.load_labeled_data', (["['Classifier']"], {'just_catalog': '(True)'}), "(['Classifier'], just_catalog=True)\n", (98736, 98771), True, 'import class_objects as co\n'), ((102368, 102384), 'numpy.vstack', 'np.vstack', (['plots'], {}), '(plots)\n', (102377, 102384), True, 'import numpy as np\n'), ((23263, 23279), 'cPickle.load', 'pickle.load', (['inp'], {}), '(inp)\n', (23274, 23279), True, 'import cPickle as pickle\n'), ((69209, 69230), 'numpy.min', 'np.min', (['std_mean_diff'], {}), '(std_mean_diff)\n', (69215, 69230), True, 'import numpy as np\n'), ((72421, 72434), 'numpy.max', 'np.max', (['score'], {}), '(score)\n', (72427, 72434), True, 'import numpy as np\n'), ((73995, 74008), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (74003, 74008), True, 'import numpy as np\n'), ((74141, 74168), 'numpy.array', 'np.array', (['[scores.shape[0]]'], {}), '([scores.shape[0]])\n', (74149, 74168), True, 'import numpy as np\n'), ((74466, 74478), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (74472, 74478), True, 'import numpy as np\n'), ((74506, 74531), 'numpy.zeros', 'np.zeros', (['(cross2 - cross1)'], {}), '(cross2 - cross1)\n', (74514, 74531), True, 'import numpy as np\n'), ((74899, 74923), 'numpy.ones', 'np.ones', (['(cross2 - cross1)'], {}), '(cross2 - cross1)\n', (74906, 74923), True, 'import numpy as np\n'), ((74944, 74964), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (74958, 74964), True, 'import numpy as np\n'), ((81169, 81188), 'numpy.array', 'np.array', (['act_names'], {}), '(act_names)\n', (81177, 81188), True, 'import numpy as np\n'), ((84134, 84187), 'numpy.hstack', 'np.hstack', (['(partial_accuracies[key], accuracies[key])'], {}), '((partial_accuracies[key], accuracies[key]))\n', (84143, 84187), True, 'import numpy as np\n'), ((91170, 91192), 'numpy.array', 'np.array', (['results_rows'], {}), '(results_rows)\n', (91178, 91192), True, 'import numpy as np\n'), ((19350, 19424), 'class_objects.dict_oper.create_sorted_dict_view', 'co.dict_oper.create_sorted_dict_view', (["self.parameters['classifier_params']"], {}), "(self.parameters['classifier_params'])\n", (19386, 19424), True, 'import class_objects as co\n'), ((29237, 29262), 'numpy.array', 'np.array', (['samples_indices'], {}), '(samples_indices)\n', (29245, 29262), True, 'import numpy as np\n'), ((37302, 37394), 'class_objects.file_oper.load_labeled_data', 'co.file_oper.load_labeled_data', (["['Testing']"], {'just_catalog': '(True)', 'include_all_catalog': '(True)'}), "(['Testing'], just_catalog=True,\n include_all_catalog=True)\n", (37332, 37394), True, 'import class_objects as co\n'), ((51323, 51344), 'numpy.isfinite', 'np.isfinite', (['testdata'], {}), '(testdata)\n', (51334, 51344), True, 'import numpy as np\n'), ((53661, 53770), 'numpy.concatenate', 'np.concatenate', (['(self.less_filtered_scores_std[:, None], self.high_filtered_scores_std[:, None]\n )'], {'axis': '(1)'}), '((self.less_filtered_scores_std[:, None], self.\n high_filtered_scores_std[:, None]), axis=1)\n', (53675, 53770), True, 'import numpy as np\n'), ((54102, 54141), 'numpy.array', 'np.array', (['self.high_filtered_scores_std'], {}), '(self.high_filtered_scores_std)\n', (54110, 54141), True, 'import numpy as np\n'), ((54181, 54220), 'numpy.array', 'np.array', (['self.less_filtered_scores_std'], {}), '(self.less_filtered_scores_std)\n', (54189, 54220), True, 'import numpy as np\n'), ((54959, 54988), 'numpy.concatenate', 'np.concatenate', (['plots'], {'axis': '(1)'}), '(plots, axis=1)\n', (54973, 54988), True, 'import numpy as np\n'), ((72837, 72856), 'numpy.isfinite', 'np.isfinite', (['scores'], {}), '(scores)\n', (72848, 72856), True, 'import numpy as np\n'), ((74669, 74705), 'numpy.mean', 'np.mean', (['act_scores[mask, :]'], {'axis': '(0)'}), '(act_scores[mask, :], axis=0)\n', (74676, 74705), True, 'import numpy as np\n'), ((80008, 80030), 'numpy.unique', 'np.unique', (['y_trues[-1]'], {}), '(y_trues[-1])\n', (80017, 80030), True, 'import numpy as np\n'), ((85022, 85052), 'numpy.atleast_2d', 'np.atleast_2d', (['accuracies[typ]'], {}), '(accuracies[typ])\n', (85035, 85052), True, 'import numpy as np\n'), ((85115, 85202), 'numpy.concatenate', 'np.concatenate', (["(self.testdata[self.test_ind]['Labels'][typ], ['Accuracy'])"], {'axis': '(0)'}), "((self.testdata[self.test_ind]['Labels'][typ], ['Accuracy']),\n axis=0)\n", (85129, 85202), True, 'import numpy as np\n'), ((104714, 104736), 'numpy.ones', 'np.ones', (['gest_dur.size'], {}), '(gest_dur.size)\n', (104721, 104736), True, 'import numpy as np\n'), ((28831, 28855), 'numpy.isfinite', 'np.isfinite', (['descriptors'], {}), '(descriptors)\n', (28842, 28855), True, 'import numpy as np\n'), ((34555, 34579), 'numpy.mean', 'np.mean', (['feat_times[key]'], {}), '(feat_times[key])\n', (34562, 34579), True, 'import numpy as np\n'), ((72285, 72303), 'numpy.isfinite', 'np.isfinite', (['score'], {}), '(score)\n', (72296, 72303), True, 'import numpy as np\n'), ((81803, 81822), 'numpy.array', 'np.array', (['undef_vec'], {}), '(undef_vec)\n', (81811, 81822), True, 'import numpy as np\n'), ((20083, 20154), 'class_objects.dict_oper.create_sorted_dict_view', 'co.dict_oper.create_sorted_dict_view', (["self.parameters['testing_params']"], {}), "(self.parameters['testing_params'])\n", (20119, 20154), True, 'import class_objects as co\n'), ((54331, 54353), 'numpy.isfinite', 'np.isfinite', (['mean_diff'], {}), '(mean_diff)\n', (54342, 54353), True, 'import numpy as np\n'), ((54658, 54693), 'numpy.isfinite', 'np.isfinite', (['self.test_ground_truth'], {}), '(self.test_ground_truth)\n', (54669, 54693), True, 'import numpy as np\n'), ((54813, 54848), 'numpy.isfinite', 'np.isfinite', (['self.test_ground_truth'], {}), '(self.test_ground_truth)\n', (54824, 54848), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.linear_model import LogisticRegression
from .base import TransformationBaseModel
class Kane(TransformationBaseModel):
"""The class which implements the Kane's approach.
+----------------+-----------------------------------------------------------------------------------+
| **Parameters** | | **model : object, optional (default=sklearn.linear_model.LogisticRegression)** |
| | | The classification model which will be used for predict uplift. |
| | | **use_weights : boolean, optional (default=False)** |
| | | Use or not weights? |
+----------------+-----------------------------------------------------------------------------------+
*******
Methods
*******
+-----------------------------------------------+----------------------------------------------------+
| :ref:`fit(self, X, y, t) <lai_fit>` | Build the model from the training set (X, y, t). |
+-----------------------------------------------+----------------------------------------------------+
| :ref:`predict(self, X, t=None) <lai_predict>` | Predict an uplift for X. |
+-----------------------------------------------+----------------------------------------------------+
"""
def __init__(self, model=LogisticRegression(n_jobs=-1), use_weights=False):
try:
model.__getattribute__('fit')
model.__getattribute__('predict')
except AttributeError:
raise ValueError('Model should contains two methods: fit and predict.')
self.model = model
self.use_weights = use_weights
def fit(self, X, y, t):
"""Build the model from the training set (X, y, t).
+------------------+---------------------------------------------------------------------------------+
| **Parameters** | | **X: numpy ndarray with shape = [n_samples, n_features]** |
| | | Matrix of features. |
| | | **y: numpy array with shape = [n_samples,]** |
| | | Array of target of feature. |
| | | **t: numpy array with shape = [n_samples,]** |
| | | Array of treatments. |
+------------------+---------------------------------------------------------------------------------+
| **Returns** | **self : object** |
+------------------+---------------------------------------------------------------------------------+
"""
y_encoded = self.__encode_data(y, t)
self.model.fit(X, y_encoded)
if self.use_weights:
self.__init_weights(t)
return self
def predict(self, X, t=None):
"""Predict an uplift for X.
+------------------+---------------------------------------------------------------------------------+
| **Parameters** | | **X: numpy ndarray with shape = [n_samples, n_features]** |
| | | Matrix of features. |
| | | **t: numpy array with shape = [n_samples,] or None** |
| | | Array of treatments. |
+------------------+---------------------------------------------------------------------------------+
| **Returns** | | **self : object** |
| | | The predicted values. |
+------------------+---------------------------------------------------------------------------------+
"""
p_tr = self.model.predict_proba(X)[:, 0]
p_cn = self.model.predict_proba(X)[:, 1]
p_tn = self.model.predict_proba(X)[:, 2]
p_cr = self.model.predict_proba(X)[:, 3]
if self.use_weights:
return (p_tr / self.treatment_count + p_cn / self.control_count) - \
(p_tn / self.treatment_count + p_cr / self.control_count)
else:
return (p_tr + p_cn) - (p_tn + p_cr)
def __encode_data(self, y, t):
y_values = []
for i in range(y.shape[0]):
if self.is_tr(y[i], t[i]):
y_values.append(0)
elif self.is_cn(y[i], t[i]):
y_values.append(1)
elif self.is_tn(y[i], t[i]):
y_values.append(2)
elif self.is_cr(y[i], t[i]):
y_values.append(3)
return np.array(y_values)
def __init_weights(self, t):
control_count, treatment_count = 0, 0
for el in t:
if el == 0.0:
control_count += 1
else:
treatment_count += 1
self.control_count = control_count
self.treatment_count = treatment_count
| [
"numpy.array",
"sklearn.linear_model.LogisticRegression"
] | [((1462, 1491), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1480, 1491), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5115, 5133), 'numpy.array', 'np.array', (['y_values'], {}), '(y_values)\n', (5123, 5133), True, 'import numpy as np\n')] |
"""
Isotonic Regression that preserves 32bit inputs.
backported from scikit-learn pull request
https://github.com/scikit-learn/scikit-learn/pull/9106"""
import numpy as np
from sklearn.utils import as_float_array
from ._isotonic import _inplace_contiguous_isotonic_regression
def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model::
min sum w[i] (y[i] - y_[i]) ** 2
subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max
where:
- y[i] are inputs (real numbers)
- y_[i] are fitted
- w[i] are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : iterable of floating-point values
The data.
sample_weight : iterable of floating-point values, optional, default: None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean, optional, default: True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floating-point values
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by <NAME> and <NAME>, section 3.
"""
order = np.s_[:] if increasing else np.s_[::-1]
y = as_float_array(y)
y = np.array(y[order], dtype=y.dtype)
if sample_weight is None:
sample_weight = np.ones(len(y), dtype=y.dtype)
else:
sample_weight = np.array(sample_weight[order], dtype=y.dtype)
_inplace_contiguous_isotonic_regression(y, sample_weight)
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y[order]
| [
"numpy.clip",
"numpy.array",
"sklearn.utils.as_float_array"
] | [((1679, 1696), 'sklearn.utils.as_float_array', 'as_float_array', (['y'], {}), '(y)\n', (1693, 1696), False, 'from sklearn.utils import as_float_array\n'), ((1705, 1738), 'numpy.array', 'np.array', (['y[order]'], {'dtype': 'y.dtype'}), '(y[order], dtype=y.dtype)\n', (1713, 1738), True, 'import numpy as np\n'), ((1858, 1903), 'numpy.array', 'np.array', (['sample_weight[order]'], {'dtype': 'y.dtype'}), '(sample_weight[order], dtype=y.dtype)\n', (1866, 1903), True, 'import numpy as np\n'), ((2209, 2236), 'numpy.clip', 'np.clip', (['y', 'y_min', 'y_max', 'y'], {}), '(y, y_min, y_max, y)\n', (2216, 2236), True, 'import numpy as np\n')] |
"""Log-gamma distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
from .deprecate import deprecation_warning
class log_gamma(Dist):
"""Log-gamma distribution."""
def __init__(self, c):
Dist.__init__(self, c=c)
def _pdf(self, x, c):
return numpy.exp(c*x-numpy.exp(x)-special.gammaln(c))
def _cdf(self, x, c):
return special.gammainc(c, numpy.exp(x))
def _ppf(self, q, c):
return numpy.log(special.gammaincinv(c,q))
class LogGamma(Add):
"""
Log-gamma distribution
Args:
shape (float, Dist):
Shape parameter
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.LogGamma(2, 2, 1)
>>> distribution
LogGamma(scale=2, shape=2, shift=1)
>>> q = numpy.linspace(0,1,6)[1:-1]
>>> distribution.inv(q).round(4)
array([0.6138, 1.639 , 2.4085, 3.1934])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0.2, 0.4, 0.6, 0.8])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0.149 , 0.2392, 0.2706, 0.2245])
>>> distribution.sample(4).round(4)
array([ 2.6074, -0.0932, 4.1166, 1.9675])
>>> distribution.mom(1).round(4)
1.8456
"""
def __init__(self, shape=1, scale=1, shift=0):
self._repr = {"shape": shape, "scale": scale, "shift": shift}
Add.__init__(self, left=log_gamma(shape)*scale, right=shift)
Loggamma = deprecation_warning(LogGamma, "Loggamma")
| [
"numpy.exp",
"scipy.special.gammaln",
"scipy.special.gammaincinv"
] | [((450, 462), 'numpy.exp', 'numpy.exp', (['x'], {}), '(x)\n', (459, 462), False, 'import numpy\n'), ((516, 541), 'scipy.special.gammaincinv', 'special.gammaincinv', (['c', 'q'], {}), '(c, q)\n', (535, 541), False, 'from scipy import special\n'), ((368, 386), 'scipy.special.gammaln', 'special.gammaln', (['c'], {}), '(c)\n', (383, 386), False, 'from scipy import special\n'), ((355, 367), 'numpy.exp', 'numpy.exp', (['x'], {}), '(x)\n', (364, 367), False, 'import numpy\n')] |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Input Pipeline for loading data into the model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import cpu_count
import dask
import dask.dataframe as dd
import dill
import lime.lime_tabular
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import storage
from six.moves.urllib.parse import urlparse
class InputReader(object):
"""Class for reading input from different sources
Assuming csv files for now
"""
def __init__(self, csv_path, task_type, target_var, na_values=None,
column_names=None,
to_drop=None, gcs_path=False, data_type=None):
"""The init method initialise and keeps track of the source of input
(say csv, json etc) and other variables.
Arguments:
csv_path : string, Path of the csv files whether local or on remote storage
task_type : string, ML task at hand, following options are expected
[classification, regression, clustering]
target_var : string, Name of the dependent/target variable
na_values : string, String by which the na values are represented in the data
column_names : string, Names of the columns passed in a text file
to_drop : list, Any redundant columns which can be dropped
gcs_path : boolean, Whether the csv is stored on google cloud storage
data_type : dict, dictionary containing the data type of all columns in format
{'a': 'float', 'b': 'object', 'c': 'int' }
"""
self.csv_path = csv_path
self.task_type = task_type
self.target_var = target_var
self.na_values = na_values
self.to_drop = to_drop
self.gcs_path = gcs_path
self.data_type = data_type
if column_names:
with tf.gfile.Open(column_names, 'r') as f:
self.column_names = [line.rstrip() for line in f]
self.column_names = [
line for line in self.column_names if line]
else:
self.column_names = column_names
def parse_csv_wrap(self):
"""
A Wrapper function for parsing csv files
Returns:
_parse_csv function
"""
return self._parse_csv()
def _parse_csv(self):
"""Reads csv files in dask to determine the datatypes and other features about data
this helps in creating a dataset object in tensorflow
Returns:
df : dask dataframe, parsed dataframe object
list(df.columns) : list, list of column names
"""
if self.gcs_path:
if isinstance(self.csv_path, list):
for index, path in enumerate(self.csv_path):
parse_result = urlparse(path)
bucket = parse_result.hostname
csv_name = parse_result.path
self._download_csv(
bucket,
csv_name,
path_name='/tmp/data_' +
str(index) +
'.csv')
csv_path = '/tmp/data_*.csv'
else:
parse_result = urlparse(self.csv_path)
bucket = parse_result.hostname
csv_name = parse_result.path
self._download_csv(bucket, csv_name)
csv_path = '/tmp/data.csv'
else:
csv_path = self.csv_path
if self.column_names:
header = None
else:
header = 'infer'
try:
df = dd.read_csv(
csv_path,
names=self.column_names,
header=header,
na_values=self.na_values,
sample=12800000,
dtype=self.data_type)
if isinstance(csv_path, list):
len(df) # Checks whether schema is consistent throughout the data
except Exception:
raise AssertionError(
'Data types given are inconsistent with data provided')
if self.to_drop is not None:
drop_column_names = self.to_drop
drop_column_names = [
name for name in drop_column_names if name in df.columns]
df = self.drop_cols(df, drop_column_names)
tf.logging.info('Dropping the columns : %s', drop_column_names)
return df, list(df.columns)
@classmethod
def drop_cols(cls, df, col_names):
"""Drops any columns which are not required by the user.
Arguments:
df : dask dataframe, Dataframe of input data
col_names : list, Columns in the data to be dropped
returns:
dask dataframe, Updated dataframe with columns dropped
"""
return df.drop(col_names, axis=1)
@classmethod
def _download_csv(cls, bucket_name, csv_path, path_name='/tmp/data.csv'):
"""Utility to download the csv files which is stored on Google Cloud Storage
to local files system. Once processed the file will be deleted
Arguments:
bucket_name : string, Remote location of csv file
csv_name : string, Name of the csv file on GCS
"""
client = storage.Client()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(csv_path)
blob.download_to_filename(path_name)
class BasicStats(object):
"""Calculating stats and using them for cleaning the data"""
def __init__(self):
"""Data type parameters"""
def is_not_used(self):
pass
@classmethod
def dropping_zero_var_cols(cls, df, target_var, stddev_list):
"""Check columns which have zero variance and removes the from the dataframe.
As the zero variance columns or contant columns can't be considered as output column
Arguments:
df : dask dataframe, The dataframe to validate
stddev : dask series, Series containing the standard deviation values for columns
target_var : string, Dependent variable for the analysis
Returns:
df : dask dataframe, Dataframe with redundant columns removed
Raises:
AssertionError : If the target column has zero deviation
"""
continuous_cols = [
col for col in df.columns if df[col].dtype != 'object']
for col in continuous_cols:
if stddev_list[col] == 0.0:
df = df.drop(col, axis=1)
if col == target_var:
err_msg = 'Target variable has zero standard deviation or a contant column. ' \
'Please check the data'
tf.logging.error(err_msg)
raise AssertionError(err_msg)
return df
@classmethod
def normalize(cls, df, target_var, mean_list, stddev_list):
"""Normalizes the numerical columns in a dataframe.
Arguments:
df : dask dataframe, The dataframe to normalize
target_var : string, Dependent variable for the analysis
mean_list : dask series, Series with all the mean values
stddev_list : dask series, Series with all the standard deviation values
Returns:
df : Dataframe with mean normalized numerical columns
"""
continuous_cols = [
col for col in df.columns if df[col].dtype != 'object' and col != target_var]
for col in continuous_cols:
df[col] = df[col].sub(mean_list[col]).div(stddev_list[col])
return df
@classmethod
def calculate_stats(cls, df, target_var):
"""Calculates descriptive stats of the dataframe required for cleaning.
Arguments:
df : dask dataframe, The dataframe at hand
target_var : string, Dependent variable for the analysis
Returns:
mean : dask series, mean of each column
median : dask series, median of each column
dict(zip(categorical_cols, mode)) : dict, Dictionary containing
categorical column as keys and their modes as values
std : dask series, standard deviation of each column
"""
categorical_columns = [
col for col in df.columns if col != target_var and df[col].dtype == 'object']
mean_op = df.mean()
std_op = df.std()
median_op = df.quantile(0.5)
mode_op = [df[col].value_counts().idxmax()
for col in categorical_columns]
mean, median, mode, std = dask.compute(
mean_op, median_op, mode_op, std_op)
return mean, median, dict(zip(categorical_columns, mode)), std
@classmethod
def impute(cls, df, target_var, median, mode):
"""Imputing missing values using median for continuous columns and mode
for categorical columns.
Arguments:
df : dask dataframe, The dataframe at hand
target_var : string, Dependent variable for the analysis
median : list, median of all columns in data
mode : list, mode of all columns in data
Returns:
df : dask dataframe, Dataframe without missing values
"""
missing_stats = df.isna().sum().compute()
cols = [col for col in df.columns if col != target_var]
for col in cols:
if missing_stats[col] > 0 and df[col].dtype == 'object':
df[col] = df[col].fillna(mode[col])
elif missing_stats[col] > 0:
df[col] = df[col].fillna(median[col])
return df
def clean_data(self, df, target_var, task_type, name):
"""Cleans a dataset by removing outliers
Outiers and missing values are replaced by
median for continuous and mode for categorical
Arguments:
df : dask dataframe, The dataframe to be cleaned
target_var : string, Name of the target variable
task_type : string, Type of the task at hand
name : string, Name of the data being cleaned (train or eval)
Returns:
df : dask dataframe, Cleaned dataframe
mean : dask series, mean of each column
std_dev : dask series, standard deviation of each column
_csv_defaults : list, list of default value of each column
"""
mean, median, mode, std_dev = self.calculate_stats(df, target_var)
df = self.dropping_zero_var_cols(df, target_var, std_dev)
df = self.impute(df, target_var, median, mode)
if task_type == 'classification':
if df[target_var].dtype == 'float64':
df[target_var] = df[target_var].astype(np.int64)
dtype_map = {'float64': 0., 'int64': 0, 'object': ''}
dtype_list = [str(dtype) for dtype in df.dtypes]
_csv_defaults = [[dtype_map[dtype]] for dtype in dtype_list]
if name == 'train' and task_type == 'classification':
self.creating_explainer_lime(df, target_var)
df.to_csv('/tmp/clean_*_' + str(name) + '.csv', index=False)
return df, mean, std_dev, _csv_defaults
def find_vocab(self, df):
"""Finds the number of levels in each categorical column.
Helps for creation of feature columns for use in tf.data API
Arguments:
df : dask dataframe, Dataframe to extract the levels from
Returns:
A dictionary of column names and the levels in each variables
[ 0 for numerical columns and number of levels for categorical columns]
"""
self.is_not_used()
cat_columns = [
col for col in df.columns if df[col].dtype == 'object']
continuous_cols = [
col for col in df.columns if df[col].dtype != 'object']
temp = dask.compute([df[col].drop_duplicates() for col in cat_columns])
column_mapping = dict()
for col in continuous_cols:
column_mapping[col] = 0
for index, col in enumerate(cat_columns):
column_mapping[col] = np.array(temp[0][index])
return column_mapping
def creating_explainer_lime(self, df, target_var):
"""Creates a LIME explainer and saves it as a pickle object
Arguments:
df : dask dataframe, Dataframe for which explainer is to be created
target_var : string, Output column of the dataframe
"""
self.is_not_used()
pandas_df = df.compute()
class_names = list(pandas_df[target_var].unique())
pandas_df = pandas_df.drop(target_var, axis=1)
dict_mapping = dict()
categorical_columns = [
col for col in pandas_df.columns if pandas_df[col].dtype == 'object']
categorical_columns_index = [index for index in range(0, len(
pandas_df.columns)) if pandas_df[pandas_df.columns[index]].dtype == 'object']
for col in categorical_columns:
pandas_df[col] = pd.Categorical(
pandas_df[col], categories=pandas_df[col].unique())
dict_mapping[col] = dict(enumerate(pandas_df[col].cat.categories))
pandas_df[col] = pandas_df[col].cat.codes
feature_names = list(pandas_df.columns)
dict_of_feature_names = dict()
for col_index in categorical_columns_index:
dict_of_feature_names[col_index] = dict_mapping[feature_names[col_index]].values(
)
explainer = lime.lime_tabular.LimeTabularExplainer(
np.array(pandas_df),
feature_names=feature_names,
class_names=class_names,
categorical_features=categorical_columns_index,
categorical_names=dict_of_feature_names,
verbose=True)
with open('/tmp/lime_explainer', 'wb') as dill_file:
dill.dump(explainer, dill_file)
dill.dump(dict_mapping, dill_file)
dill.dump(feature_names, dill_file)
class DatasetInput(object):
"""
Class for building a tf.data object and input function for tf.Estimator
"""
def __init__(self, num_epochs, batch_size, buffer_size,
csv_defaults, csv_cols, target_var, task_type,
condition=None):
"""Initializes the dataset object for a csv reader
num_epochs : integer, number of epochs to run
batch_size : integer, batch size of the data
buffer_size : integer, buffer size
csv_defaults : dict, default value for each column
csv_cols : list, list of column names of the data
target_var : string, name of the target variable
feat_cols : list, tf.featurecolumn objects to define features
task_type : string, ML task at hand, following options are expected
[classification, regression, clustering]
condition : string, condition of target variable
"""
self.num_epochs = num_epochs
self.batch_size = batch_size
self.buffer_size = buffer_size
self.csv_defaults = csv_defaults
self.csv_cols = csv_cols
self.target_var = target_var
self.feat_cols = []
self.task_type = task_type
self.condition = condition
def parse_csv(self, line):
"""Decodes an item from the textline dataset and parses them into columns
Arguments:
line : string, The items returned by the dataset object
Returns:
features : textline dataset, Data with all the features column except label column
label : textline dataset, Data of label column
"""
parsed_line = tf.decode_csv(line, record_defaults=self.csv_defaults)
tf.logging.info(
'The Default datatypes read are : %s',
self.csv_defaults)
features = dict(zip(self.csv_cols, parsed_line))
label = features.pop(self.target_var)
if self.condition:
label = tf.equal(label, self.condition)
return features, label
@staticmethod
def _get_pattern(name, csv_path=None):
"""
Helper function for returnning the naming pattern of the cleaned data
Arguments:
name : string, type of the data ['Train' or 'Eval']
csv_path : string, path of the cleaned csv
Returns :
pattern : string, globpath of the cleaned data
"""
pattern = '/tmp/clean_*_{}*'.format(name)
if csv_path is not None:
pattern = csv_path
return pattern
def input_fn(self, name, csv_path=None):
"""Creates a dataset object for the model to consume. Input function for estimator
Arguments:
name : string, Name of the data [Train or Eval]
csv_path : The path of the csv on any storage system
Returns:
features : tf.data.TextLineDataset object, Dataset containing batch of features
labels : tf.data.TextLineDataset object, Dataset containing batch of labels
"""
pattern = self._get_pattern(name, csv_path)
tf.logging.info('The Pattern of files is : %s', pattern)
filenames = tf.matching_files(pattern=pattern)
dataset = tf.data.TextLineDataset(filenames).skip(1).map(
self.parse_csv, num_parallel_calls=cpu_count())
dataset = dataset.shuffle(buffer_size=self.batch_size * 100)
dataset = dataset.apply(tf.contrib.data.ignore_errors())
dataset = dataset.repeat(self.num_epochs)
dataset = dataset.batch(self.batch_size) # determine the ideal number
dataset = dataset.prefetch(self.buffer_size)
iterator = dataset.make_one_shot_iterator()
feats, labs = iterator.get_next()
return feats, labs
def kmeans_input_fn(self, name, csv_path=None):
"""Input function for kmeans
Arguments:
name : string, Name of the data [Train or Eval]
csv_path : The path of the csv on any storage system
Returns:
A batch of features
"""
pattern = self._get_pattern(name, csv_path)
tf.logging.info('The Pattern of files is : %s', pattern)
df = dd.read_csv(pattern)
vectors = dask.compute(df.values)
return tf.train.limit_epochs(
tf.convert_to_tensor(vectors[0], dtype=tf.float32), num_epochs=1)
def create_feature_columns_wrap(self, dictionary, mean, std_dev):
"""
Wrapper function for returning create_feature_columns function
Arguments:
dictionary : dict, Dictionary with variable names and levels
mean : dask series, mean of the data
std_dev : dask_series, standard deviation of the data
Returns:
_create_feature_columns function
"""
return self._create_feature_columns(dictionary, mean, std_dev)
def _create_feature_columns(self, dictionary, mean, std_dev):
"""Creates an instance of tf.feature columns for each column
in the feature set. Required for canned estimators
Arguments:
dictionary : dict, Dictionary with variable names and levels
mean : dask series, mean of the data
std_dev : dask_series, standard deviation of the data
Returns:
A list of feature column objects based on the dictionary
"""
tmp_mean = 0.0
tmp_std_dev = 0.0
for col, vocab in dictionary.items():
if isinstance(vocab, int):
tmp_mean = mean[col]
tmp_std_dev = std_dev[col]
feat_col = tf.feature_column.numeric_column(
col, normalizer_fn=lambda x: (x - tmp_mean) / tmp_std_dev)
else:
feat_col = tf.feature_column.categorical_column_with_vocabulary_list(
col, vocab, num_oov_buckets=1)
self.feat_cols.append(feat_col)
return self.feat_cols
| [
"google.cloud.storage.Client",
"tensorflow.gfile.Open",
"tensorflow.equal",
"tensorflow.logging.error",
"dask.compute",
"tensorflow.contrib.data.ignore_errors",
"tensorflow.logging.info",
"tensorflow.matching_files",
"six.moves.urllib.parse.urlparse",
"dask.dataframe.read_csv",
"tensorflow.data.... | [((6033, 6049), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (6047, 6049), False, 'from google.cloud import storage\n'), ((9436, 9485), 'dask.compute', 'dask.compute', (['mean_op', 'median_op', 'mode_op', 'std_op'], {}), '(mean_op, median_op, mode_op, std_op)\n', (9448, 9485), False, 'import dask\n'), ((16587, 16641), 'tensorflow.decode_csv', 'tf.decode_csv', (['line'], {'record_defaults': 'self.csv_defaults'}), '(line, record_defaults=self.csv_defaults)\n', (16600, 16641), True, 'import tensorflow as tf\n'), ((16650, 16723), 'tensorflow.logging.info', 'tf.logging.info', (['"""The Default datatypes read are : %s"""', 'self.csv_defaults'], {}), "('The Default datatypes read are : %s', self.csv_defaults)\n", (16665, 16723), True, 'import tensorflow as tf\n'), ((18046, 18102), 'tensorflow.logging.info', 'tf.logging.info', (['"""The Pattern of files is : %s"""', 'pattern'], {}), "('The Pattern of files is : %s', pattern)\n", (18061, 18102), True, 'import tensorflow as tf\n'), ((18123, 18157), 'tensorflow.matching_files', 'tf.matching_files', ([], {'pattern': 'pattern'}), '(pattern=pattern)\n', (18140, 18157), True, 'import tensorflow as tf\n'), ((19090, 19146), 'tensorflow.logging.info', 'tf.logging.info', (['"""The Pattern of files is : %s"""', 'pattern'], {}), "('The Pattern of files is : %s', pattern)\n", (19105, 19146), True, 'import tensorflow as tf\n'), ((19160, 19180), 'dask.dataframe.read_csv', 'dd.read_csv', (['pattern'], {}), '(pattern)\n', (19171, 19180), True, 'import dask.dataframe as dd\n'), ((19199, 19222), 'dask.compute', 'dask.compute', (['df.values'], {}), '(df.values)\n', (19211, 19222), False, 'import dask\n'), ((4348, 4479), 'dask.dataframe.read_csv', 'dd.read_csv', (['csv_path'], {'names': 'self.column_names', 'header': 'header', 'na_values': 'self.na_values', 'sample': '(12800000)', 'dtype': 'self.data_type'}), '(csv_path, names=self.column_names, header=header, na_values=\n self.na_values, sample=12800000, dtype=self.data_type)\n', (4359, 4479), True, 'import dask.dataframe as dd\n'), ((5088, 5151), 'tensorflow.logging.info', 'tf.logging.info', (['"""Dropping the columns : %s"""', 'drop_column_names'], {}), "('Dropping the columns : %s', drop_column_names)\n", (5103, 5151), True, 'import tensorflow as tf\n'), ((13009, 13033), 'numpy.array', 'np.array', (['temp[0][index]'], {}), '(temp[0][index])\n', (13017, 13033), True, 'import numpy as np\n'), ((14461, 14480), 'numpy.array', 'np.array', (['pandas_df'], {}), '(pandas_df)\n', (14469, 14480), True, 'import numpy as np\n'), ((14772, 14803), 'dill.dump', 'dill.dump', (['explainer', 'dill_file'], {}), '(explainer, dill_file)\n', (14781, 14803), False, 'import dill\n'), ((14816, 14850), 'dill.dump', 'dill.dump', (['dict_mapping', 'dill_file'], {}), '(dict_mapping, dill_file)\n', (14825, 14850), False, 'import dill\n'), ((14863, 14898), 'dill.dump', 'dill.dump', (['feature_names', 'dill_file'], {}), '(feature_names, dill_file)\n', (14872, 14898), False, 'import dill\n'), ((16899, 16930), 'tensorflow.equal', 'tf.equal', (['label', 'self.condition'], {}), '(label, self.condition)\n', (16907, 16930), True, 'import tensorflow as tf\n'), ((18385, 18416), 'tensorflow.contrib.data.ignore_errors', 'tf.contrib.data.ignore_errors', ([], {}), '()\n', (18414, 18416), True, 'import tensorflow as tf\n'), ((19273, 19323), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['vectors[0]'], {'dtype': 'tf.float32'}), '(vectors[0], dtype=tf.float32)\n', (19293, 19323), True, 'import tensorflow as tf\n'), ((2543, 2575), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['column_names', '"""r"""'], {}), "(column_names, 'r')\n", (2556, 2575), True, 'import tensorflow as tf\n'), ((3954, 3977), 'six.moves.urllib.parse.urlparse', 'urlparse', (['self.csv_path'], {}), '(self.csv_path)\n', (3962, 3977), False, 'from six.moves.urllib.parse import urlparse\n'), ((18271, 18282), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (18280, 18282), False, 'from multiprocessing import cpu_count\n'), ((20588, 20684), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['col'], {'normalizer_fn': '(lambda x: (x - tmp_mean) / tmp_std_dev)'}), '(col, normalizer_fn=lambda x: (x - tmp_mean\n ) / tmp_std_dev)\n', (20620, 20684), True, 'import tensorflow as tf\n'), ((20746, 20838), 'tensorflow.feature_column.categorical_column_with_vocabulary_list', 'tf.feature_column.categorical_column_with_vocabulary_list', (['col', 'vocab'], {'num_oov_buckets': '(1)'}), '(col, vocab,\n num_oov_buckets=1)\n', (20803, 20838), True, 'import tensorflow as tf\n'), ((3501, 3515), 'six.moves.urllib.parse.urlparse', 'urlparse', (['path'], {}), '(path)\n', (3509, 3515), False, 'from six.moves.urllib.parse import urlparse\n'), ((7519, 7544), 'tensorflow.logging.error', 'tf.logging.error', (['err_msg'], {}), '(err_msg)\n', (7535, 7544), True, 'import tensorflow as tf\n'), ((18176, 18210), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['filenames'], {}), '(filenames)\n', (18199, 18210), True, 'import tensorflow as tf\n')] |
import numpy as np
def _numerical_gradient_no_batch(f, x):
'''梯度值计算函数'''
h = 1e-4
grad = np.zeros_like(x) #梯度值数组
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fx_h1 = f(x) # f(x + h)
x[idx] = tmp_val - h
fx_h2 = f(x) # f(x - h)
grad[idx] = (fx_h1 - fx_h2) / (2*h) #计算梯度值
x[idx] = tmp_val
return grad
def numerical_gradient(f, X):
'''针对传输的参数 X 为矩阵时计算其相应的梯度'''
if X.ndim == 1:
return _numerical_gradient_no_batch(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_no_batch(f, x)
return grad
def gradient_descent(f, init_x, lr=0.01, step_num=100):
'''梯度下降法函数
:param f: 需要优化的函数
:param init_x: 起点
:param lr: 步长/学习率
:param step_num: 下降步数
:return x, x_history: 使得函数结果最优的 x , x 在梯度下降过程中的变化值
'''
x = init_x
x_history = []
for i in range(step_num):
x_history.append(x.copy())
grad = numerical_gradient(f, x)
x -= lr * grad
return x, np.array(x_history) | [
"numpy.array",
"numpy.zeros_like"
] | [((103, 119), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (116, 119), True, 'import numpy as np\n'), ((571, 587), 'numpy.zeros_like', 'np.zeros_like', (['X'], {}), '(X)\n', (584, 587), True, 'import numpy as np\n'), ((1112, 1131), 'numpy.array', 'np.array', (['x_history'], {}), '(x_history)\n', (1120, 1131), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import glob
from neuropixels import generalephys_mua as ephys_mua
from neuropixels.generalephys import get_waveform_duration,get_waveform_PTratio,get_waveform_repolarizationslope,option234_positions
from scipy.cluster.vq import kmeans2
import seaborn as sns;sns.set_style("ticks")
import matplotlib.pyplot as plt
import h5py
import matplotlib.path as mpath
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
import os
def get_peak_waveform_from_template(template):
max = 0
ind=0
peak = np.zeros(np.shape(template.T)[0])
for i,wv in enumerate(template.T):
if np.max(np.abs(wv)) > max:
max = np.max(np.abs(wv))
ind = i
peak = wv
return peak
def df_from_phy_multimouse(folder,expnum='1',recnum='1',site_positions = option234_positions,**kwargs):
if 'est' not in folder:
base_folder = os.path.basename(folder)
cohort_ = os.path.basename(base_folder).split('_')[-2]
mouse_ = os.path.basename(base_folder).split('_')[-1]
#traverse down tree to data
if 'open-ephys-neuropix' in base_folder:
try:
rec_folder = glob.glob(folder+'/*')[0]
print(rec_folder, 'hey')
except:
print(base_folder)
return None
else:
rec_folder = folder
print(rec_folder)
raw_path = os.path.join(rec_folder,'recording'+str(recnum),'continuous')
if len(glob.glob(raw_path+'/*100.0*'))>0:
raw_path = glob.glob(raw_path+'/*100.0*')[0]
print('loading from '+raw_path)
else:
print('could not find data folder for '+raw_path)
if os.path.isfile(os.path.join(raw_path,'spike_clusters.npy')) :
# df = df_from_phy(raw_path,site_positions = ephys.option234_positions,cluster_file='KS2',cohort=cohort,mouse=mouse)
path = raw_path
units = ephys.load_phy_template(path,cluster_file='cluster_group',site_positions=site_positions)
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1; cohort = []
probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
for unit in list(units.keys()):
if 'probe' in kwargs.keys():
probe_id.extend([kwargs['probe']])
else:
probe_id.extend(['A'])
if 'mouse' in kwargs.keys():
mouse.extend([kwargs['mouse']])
else:
mouse.extend([mouse_])
if 'experiment' in kwargs.keys():
experiment.extend([kwargs['experiment']])
else:
experiment.extend(['placeholder'])
if 'cohort' in kwargs.keys():
cohort.extend([kwargs['cohort']])
else:
cohort.extend([cohort_])
xpos.extend([units[unit]['xpos']])
ypos.extend([units[unit]['ypos']])
template.extend([units[unit]['template']])
times.append(units[unit]['times'])
waveform.append(units[unit]['waveform_weights'])
df = pd.DataFrame(index=index)
df = df.fillna(np.nan)
# df['nwb_id'] = nwb_id
df['mouse'] = mouse
df['experiment'] = experiment
df['probe'] = probe_id
# df['structure'] = structure
df['cell'] = units.keys()
df['cohort'] = cohort
df['times'] = times
df['ypos'] = ypos
df['xpos'] = xpos
# df['depth'] = depth
df['waveform'] = waveform
df['template'] = template
return df
def df_from_phy(folder,expnum='1',recnum='1',site_positions = option234_positions,**kwargs):
# if 'est' not in folder:
# base_folder = os.path.basename(folder)
# cohort_ = os.path.basename(base_folder).split('_')[-2]
# mouse_ = os.path.basename(base_folder).split('_')[-1]
#traverse down tree to data
# if 'open-ephys-neuropix' in base_folder:
# try:
# rec_folder = glob.glob(folder+'/*')[0]
# except:
# print(base_folder)
# return None
# else:
# rec_folder = folder
# raw_path = os.path.join(rec_folder,'experiment'+str(expnum),'recording'+str(recnum),'continuous')
# if len(glob.glob(raw_path+'/*100.0*'))>0:
# raw_path = glob.glob(raw_path+'/*100.0*')[0]
# print('loading from '+raw_path)
# else:
# print('could not find data folder for '+raw_path)
raw_path=folder
if 'cohort' in kwargs.keys():
cohort = kwargs['cohort']
else:
cohort = None
if 'mouse' in kwargs.keys():
mouse = kwargs['mouse']
else:
mouse = None
# df = df_from_phy(raw_path,site_positions = ephys.option234_positions,cluster_file='KS2',cohort=cohort,mouse=mouse)
path = raw_path
#units = ephys.load_phy_template(path,cluster_file='KS2',site_positions=site_positions)
units = ephys_mua.load_phy_template_mua(path,site_positions=site_positions)
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1; cohort = []
probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
for unit in list(units.index):
if 'probe' in kwargs.keys():
probe_id.extend([kwargs['probe']])
else:
probe_id.extend(['A'])
if 'mouse' in kwargs.keys():
mouse.extend([kwargs['mouse']])
else:
mouse.extend([mouse_])
if 'experiment' in kwargs.keys():
experiment.extend([kwargs['experiment']])
else:
experiment.extend(['placeholder'])
if 'cohort' in kwargs.keys():
cohort.extend([kwargs['cohort']])
else:
cohort.extend([cohort_])
df = units
df['mouse'] = mouse
df['experiment'] = experiment
df['probe'] = probe_id
# df['structure'] = structure
df['cell'] = units.index
df['cohort'] = cohort
df['times'] = units['times']
df['ypos'] = units['ypos']
df['xpos'] = units['xpos']
# df['depth'] = xpos
df['waveform'] = units['waveform_weights']
df['template'] = units['template']
return df
def df_from_nwb(nwb_data,structures=None,insertion_angle=55,nwbid=0):
if type(nwb_data)==str:
#print(nwb_data)
nwbid = nwb_data
nwb_data = h5py.File(nwb_data)
else:
nwb_data = nwb_data
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1
nwb_id = [];probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
if 'processing' in nwb_data.keys():
for probe in list(nwb_data['processing'].keys()):
if 'UnitTimes' in list(nwb_data['processing'][probe].keys()):
for i,u in enumerate(list(nwb_data['processing'][probe]['UnitTimes'].keys())):
if u != 'unit_list':
nwb_id.append(nwbid)
probe_id.append(probe)
index.append(count);count+=1
mouse.append(str(np.array(nwb_data.get('identifier'))))
experiment.append(1)
cell.append(u)
times.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['times']));# print(list(nwb_data['processing'][probe]['UnitTimes'][u].keys()))
if 'ypos' in list(nwb_data['processing'][probe]['UnitTimes'][u].keys()):
ypos.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['ypos']))
has_ypos = True
else:
ypos.append(None)
has_ypos = False
if 'depth' in list(nwb_data['processing'][probe]['UnitTimes'][u].keys()):
depth.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['depth']))
else:
if has_ypos:
depth.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['ypos']))
else:
depth.append(None)
if 'xpos' in list(nwb_data['processing'][probe]['UnitTimes'][u].keys()):
xpos.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['xpos']))
has_xpos = True
else:
xpos.append(None)
has_xpos = False
template.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['template']))
waveform.append(get_peak_waveform_from_template(template[-1]))
if not structures == None:
structur = None
for struct, bounds in structures.iteritems():
if ypos[-1] > bounds[0] and ypos[-1]< bounds[1] :
structur=struct
else:
structur = None
structure.append(structur)
df = pd.DataFrame(index=index)
df = df.fillna(np.nan)
df['nwb_id'] = nwb_id
df['mouse'] = mouse
df['experiment'] = experiment
df['probe'] = probe_id
df['structure'] = structure
df['cell'] = cell
df['times'] = times
df['ypos'] = ypos
df['xpos'] = xpos
df['depth'] = depth
df['waveform'] = waveform
df['template'] = template
return df
def classify_waveform_shape(df,plots=False,save_plots=False,basepath='',kmeans=0):
durations = np.zeros(np.shape(df)[0])
PTratio = np.zeros(np.shape(df)[0])
repolarizationslope = np.zeros(np.shape(df)[0])
for i,waveform in enumerate(df.waveform):
# try:
durations[i]=get_waveform_duration(waveform)
PTratio[i]=get_waveform_PTratio(waveform)
repolarizationslope[i]=get_waveform_repolarizationslope(waveform,window=18)
# except:
# durations[i]=np.nan
# PTratio[i]=np.nan
# repolarizationslope[i]=np.nan
df['waveform_duration'] = durations
df['waveform_PTratio'] = PTratio
df['waveform_repolarizationslope'] = repolarizationslope
waveform_k = kmeans2(np.vstack(((durations-np.min(durations))/np.max((durations-np.min(durations))),
(PTratio-np.min(PTratio))/np.max((PTratio-np.min(PTratio))),
(repolarizationslope-np.min(repolarizationslope))/np.max((repolarizationslope-np.min(repolarizationslope))))).T,
2, iter=300, thresh=5e-6,minit='points')
# waveform_k = kmeans2(np.vstack((durations/np.max(durations),PTratio/np.max(PTratio))).T, 2, iter=300, thresh=5e-6,minit='points')
# waveform_k = kmeans2(np.vstack((durations/np.max(durations),(repolarizationslope-np.min(repolarizationslope))/np.max(repolarizationslope))).T, 2, iter=900, thresh=5e-7,minit='points')
#assign fs and rs to the kmeans results
if np.mean(durations[np.where(waveform_k[1]==0)[0]]) < np.mean(durations[np.where(waveform_k[1]==1)[0]]):
fs_k = 0;rs_k = 1
waveform_class_ids = ['fs','rs']
else:
rs_k = 0;fs_k = 1
waveform_class_ids = ['rs','fs']
waveform_class = [waveform_class_ids[k] for k in waveform_k[1]]
#uncomment this to ignore the preceding kmeans and just split on the marginal distribution of durations
if kmeans==0:
waveform_class = ['fs' if duration < 0.0004 else 'rs' for i,duration in enumerate(durations) ]
else:
waveform_k = kmeans2(np.vstack(((durations-np.min(durations))/np.max((durations-np.min(durations))),
(PTratio-np.min(PTratio))/np.max((PTratio-np.min(PTratio))),
(repolarizationslope-np.min(repolarizationslope))/np.max((repolarizationslope-np.min(repolarizationslope))))).T,
kmeans, iter=300, thresh=5e-6,minit='points')
# waveform_k = kmeans2(np.vstack((durations/np.max(durations),PTratio/np.max(PTratio))).T, 2, iter=300, thresh=5e-6,minit='points')
# waveform_k = kmeans2(np.vstack((durations/np.max(durations),(repolarizationslope-np.min(repolarizationslope))/np.max(repolarizationslope))).T, 2, iter=900, thresh=5e-7,minit='points')
#assign fs and rs to the kmeans results
if np.mean(durations[np.where(waveform_k[1]==0)[0]]) < np.mean(durations[np.where(waveform_k[1]==1)[0]]):
fs_k = 0;rs_k = 1
waveform_class_ids = ['fs','rs']
else:
rs_k = 0;fs_k = 1
waveform_class_ids = ['rs','fs']
waveform_class = [waveform_class_ids[k] for k in waveform_k[1]]
#force upwards spikes to have the own class, because we're not sure how they fit in this framework
waveform_class = [waveform_class[i] if ratio < 1.0 else 'up' for i,ratio in enumerate(PTratio) ]
df['waveform_class']=waveform_class
#mark narrow upwards spikes as axons
waveform_class = ['axon' if all([duration < 0.0004,waveform_class[i]=='up']) else waveform_class[i] for i,duration in enumerate(durations) ]
df['waveform_class']=waveform_class
# #mark narrow downward spike at the very bottom of cortex as axons
#waveform_class = ['axon' if all([duration < 0.0004,waveform_class[i]=='fs',df['depth'][i+1] > 750, df['depth'][i+1]<1050]) else waveform_class[i] for i,duration in enumerate(durations) ]
df['waveform_class']=waveform_class
if plots:
plot_waveform_classification(durations, PTratio, repolarizationslope,df,save_plots=save_plots,basepath=basepath)
return df
def plot_waveform_classification(durations, PTratio, repolarizationslope, df,save_plots=False, basepath=''):
f,ax = plt.subplots(1,3,figsize=(8,3))
ax[0].plot(durations[np.where(df.waveform_class=='rs')[0]],PTratio[np.where(df.waveform_class=='rs')[0]],'o',ms=3.2)
ax[0].plot(durations[np.where(df.waveform_class=='fs')[0]],PTratio[np.where(df.waveform_class=='fs')[0]],'o',ms=3.2)
#ax[0].plot(durations[np.where(df.waveform_class=='up')[0]],PTratio[np.where(df.waveform_class=='up')[0]],'o',ms=3.2)
ax[0].plot(durations[np.where(df.waveform_class=='axon')[0]],PTratio[np.where(df.waveform_class=='axon')[0]],'o',ms=3.2)
ax[0].set_xlabel('width (sec)')
ax[0].set_ylabel('peak/trough ratio')
ax[1].plot(durations[np.where(df.waveform_class=='rs')[0]],repolarizationslope[np.where(df.waveform_class=='rs')[0]],'o',ms=3.2)
ax[1].plot(durations[np.where(df.waveform_class=='fs')[0]],repolarizationslope[np.where(df.waveform_class=='fs')[0]],'o',ms=3.2)
#ax[1].plot(durations[np.where(df.waveform_class=='up')[0]],repolarizationslope[np.where(df.waveform_class=='up')[0]],'o',ms=3.2)
ax[1].plot(durations[np.where(df.waveform_class=='axon')[0]],repolarizationslope[np.where(df.waveform_class=='axon')[0]],'o',ms=3.2)
ax[1].set_xlabel('width (sec)')
ax[1].set_ylabel('repolarization slope')
ax[2].plot(PTratio[np.where(df.waveform_class=='rs')[0]],repolarizationslope[np.where(df.waveform_class=='rs')[0]],'o',ms=3.2)
ax[2].plot(PTratio[np.where(df.waveform_class=='fs')[0]],repolarizationslope[np.where(df.waveform_class=='fs')[0]],'o',ms=3.2)
#ax[2].plot(PTratio[np.where(df.waveform_class=='up')[0]],repolarizationslope[np.where(df.waveform_class=='up')[0]],'o',ms=3.2)
ax[2].plot(PTratio[np.where(df.waveform_class=='axon')[0]],repolarizationslope[np.where(df.waveform_class=='axon')[0]],'o',ms=3.2)
ax[2].set_ylabel('repolarization slope')
ax[2].set_xlabel('peak/trough ratio')
ax[0].set_xlim(0.0,0.0015);ax[1].set_xlim(0.0,0.0015)
ax[0].set_ylim(0,1.1);ax[2].set_xlim(0,1.1)
plt.tight_layout()
for axis in ax:
# ephys.cleanAxes(axis,bottomLabels=True,leftLabels=True)
axis.locator_params(axis='x',nbins=4)
ax[2].legend(loc='upper right')
panelname = 'waveforms_clusters'
plt.tight_layout()
if save_plots:
plt.gcf().savefig(os.path.join(basepath,'figures','panels',panelname+'.png'),fmt='png',dpi=300)
plt.gcf().savefig(os.path.join(basepath,'figures','panels',panelname+'.eps'),fmt='eps')
nbins = 36
plt.hist(durations[np.where(df.waveform_class=='rs')[0]],range=(0,0.0015),bins=nbins)
plt.hist(durations[np.where(df.waveform_class=='fs')[0]],range=(0,0.0015),bins=nbins)
plt.hist(durations[np.where(df.waveform_class=='axon')[0]],range=(0,0.0015),bins=nbins)
plt.figure()
plt.hist((durations[np.where(df.waveform_class=='rs')[0]],durations[np.where(df.waveform_class=='fs')[0]],durations[np.where(df.waveform_class=='axon')[0]]),range=(0,0.0015),bins=nbins,stacked=True)
#ephys.cleanAxes(plt.gca(),bottomLabels=True,leftLabels=True)
plt.xlabel('waveform duration (sec)')
plt.ylabel('neuron count')
panelname = 'waveforms_durationhistogram'
plt.tight_layout()
if save_plots:
plt.gcf().savefig(os.path.join(basepath,'figures','panels',panelname+'.png'),fmt='png',dpi=300)
plt.gcf().savefig(os.path.join(basepath,'figures','panels',panelname+'.eps'),fmt='eps')
plt.figure(figsize=(4,3))
waveform_time = np.linspace(-1*np.where(df.waveform[1] > 0.)[0][0]/30000.,(len(df.waveform[1])-np.where(df.waveform[1] > 0.)[0][0])/30000.,len(df.waveform[1]))*1000
#plot all
for i,waveform in enumerate(df.waveform):
#waveform_time = np.linspace(0,len(waveform)/30000.,len(waveform))*1000
if df.waveform_class[i]=='rs':
plt.plot(waveform_time,waveform/np.max(np.abs(waveform)),color=sns.color_palette()[0],alpha=0.01)
if df.waveform_class[i]=='axon':#df.waveform_class.unique()[np.where(df.waveform_class=='axon')[0]]:
plt.plot(waveform_time,waveform/np.max(np.abs(waveform)),color=sns.color_palette()[2],alpha=0.01)
if df.waveform_class[i]=='fs':#df.waveform_class.unique()[np.where(df.waveform_class=='fs')[0]]:
plt.plot(waveform_time,waveform/np.max(np.abs(waveform)),color=sns.color_palette()[1],alpha=0.01)
# plot means, normalized
for waveform_class in ['rs','fs','axon']:#df.waveform_class.unique():
if waveform_class != 'up' and waveform_class!='axon':
plt.plot(waveform_time,np.mean(df.waveform[df.waveform_class==waveform_class])/(np.max(np.abs(np.mean(df.waveform[df.waveform_class==waveform_class])))),lw=4)
#plt.plot(waveform_time,np.mean(df.waveform[df.waveform_class==waveform_class])/(np.max(np.abs(np.mean(df.waveform[df.waveform_class==waveform_class])))),lw=2)
# plt.plot(waveform_time,np.mean(df.waveform[df.waveform_class=='rs'])/(np.min(np.mean(df.waveform[df.waveform_class=='rs']))*-1),lw=2)
# plt.plot(waveform_time,np.mean(df.waveform[df.waveform_class=='up'])/(np.max(np.mean(df.waveform[df.waveform_class=='up']))),lw=2)
plt.title('RS: '+str(len(df.waveform_class[df.waveform_class=='rs']))+
' FS: '+str(len(df.waveform_class[df.waveform_class=='fs']))+
' axon: '+str(len(df.waveform_class[df.waveform_class=='axon'])))#+
# ' up:'+str(len(df.waveform_class[df.waveform_class=='up'])))
plt.gca().set_xlim(-1.,1.4)
plt.gca().legend(loc='upper left')
#ephys.cleanAxes(plt.gca(),leftLabels=True,bottomLabels=True)
plt.gca().set_ylabel('normalized amplitude',size=10)
d=plt.gca().set_xlabel('time (msec)',size=10)
panelname = 'waveforms_mean_peak'
plt.tight_layout()
if save_plots:
plt.gcf().savefig(os.path.join(basepath,'figures','panels',panelname+'.png'),fmt='png',dpi=300)
plt.gcf().savefig(os.path.join(basepath,'figures','panels',panelname+'.eps'),fmt='eps')
def drawPhaseIIIProbe(colors,ax=-1,highlight=-1,clim=None, cmap='viridis', drawLines=False):
'''
Args:
colors: a list of values to plotted as colors on the probe
ax
highlight
clim: color map limits
cmap: color map to use; default viridis
drawLines: whether or not to draw the outline of the probe; default is False
Returns:
None, plots an image of the input colors on a Phase3A Neuropixels probes
written by <NAME>
'''
if ax == -1:
fig, ax = plt.subplots()
patches = []
for ch in range(0,len(colors)):
channelPos = ch % 4
channelHeight = ch / 4
if channelPos == 0:
xloc = -1.5
yloc = channelHeight*2
elif channelPos == 1:
xloc = 0.5
yloc = channelHeight*2
elif channelPos == 2:
xloc = -0.5
yloc = channelHeight*2 + 1
else:
xloc = 1.5
yloc = channelHeight*2 + 1
rect = mpatches.Rectangle([xloc, yloc], 1.0, 2.0, ec="none", ls='None')
if drawLines:
if ch % 50 == 0:
plt.plot([-5, 6], [yloc, yloc], 'gray')
if ch % 100 == 0:
plt.plot([-5, 6], [yloc, yloc], '-k')
patches.append(rect)
if ch == highlight:
highlightX = xloc
highlightY = yloc
highlight = 1
collection = PatchCollection(patches, cmap=cmap)
collection.set_array(colors)
if clim != None:
collection.set_clim(clim[0],clim[1])
ax.add_collection(collection)
for ch in np.arange(0,len(colors),50):
plt.plot([-2.5,-2],[ch/2, ch/2],'k')
if highlight > -1:
print(highlightY)
plt.plot(highlightX, highlightY, color=[1,1,1])
plt.axis('off')
plt.xlim((-5,6))
plt.ylim((-5,ch/2 + 20))
def get_spike_limits(nwb_data):
firsts = [np.array(nwb_data['processing'][nwb_data['processing'].keys()[0]]['UnitTimes'][other]['times'])[0]\
for other in np.array(nwb_data['processing'][nwb_data['processing'].keys()[0]]['UnitTimes']['unit_list'])]
lasts = [np.array(nwb_data['processing'][nwb_data['processing'].keys()[0]]['UnitTimes'][other]['times'])[-1]\
for other in np.array(nwb_data['processing'][nwb_data['processing'].keys()[0]]['UnitTimes']['unit_list'])]
return np.min(firsts),np.max(lasts) | [
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"numpy.array",
"neuropixels.generalephys.get_waveform_repolarizationslope",
"numpy.mean",
"neuropixels.generalephys.get_waveform_duration",
"seaborn.color_palette",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
... | [((297, 319), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (310, 319), True, 'import seaborn as sns\n'), ((5513, 5581), 'neuropixels.generalephys_mua.load_phy_template_mua', 'ephys_mua.load_phy_template_mua', (['path'], {'site_positions': 'site_positions'}), '(path, site_positions=site_positions)\n', (5544, 5581), True, 'from neuropixels import generalephys_mua as ephys_mua\n'), ((9987, 10012), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index'}), '(index=index)\n', (9999, 10012), True, 'import pandas as pd\n'), ((14316, 14350), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(8, 3)'}), '(1, 3, figsize=(8, 3))\n', (14328, 14350), True, 'import matplotlib.pyplot as plt\n'), ((16196, 16214), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16212, 16214), True, 'import matplotlib.pyplot as plt\n'), ((16402, 16420), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16418, 16420), True, 'import matplotlib.pyplot as plt\n'), ((16904, 16916), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16914, 16916), True, 'import matplotlib.pyplot as plt\n'), ((17181, 17218), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""waveform duration (sec)"""'], {}), "('waveform duration (sec)')\n", (17191, 17218), True, 'import matplotlib.pyplot as plt\n'), ((17220, 17246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""neuron count"""'], {}), "('neuron count')\n", (17230, 17246), True, 'import matplotlib.pyplot as plt\n'), ((17291, 17309), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17307, 17309), True, 'import matplotlib.pyplot as plt\n'), ((17517, 17543), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (17527, 17543), True, 'import matplotlib.pyplot as plt\n'), ((19688, 19706), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19704, 19706), True, 'import matplotlib.pyplot as plt\n'), ((21098, 21133), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'cmap': 'cmap'}), '(patches, cmap=cmap)\n', (21113, 21133), False, 'from matplotlib.collections import PatchCollection\n'), ((21432, 21447), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (21440, 21447), True, 'import matplotlib.pyplot as plt\n'), ((21449, 21466), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-5, 6)'], {}), '((-5, 6))\n', (21457, 21466), True, 'import matplotlib.pyplot as plt\n'), ((21467, 21494), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5, ch / 2 + 20)'], {}), '((-5, ch / 2 + 20))\n', (21475, 21494), True, 'import matplotlib.pyplot as plt\n'), ((970, 994), 'os.path.basename', 'os.path.basename', (['folder'], {}), '(folder)\n', (986, 994), False, 'import os\n'), ((6941, 6960), 'h5py.File', 'h5py.File', (['nwb_data'], {}), '(nwb_data)\n', (6950, 6960), False, 'import h5py\n'), ((10653, 10684), 'neuropixels.generalephys.get_waveform_duration', 'get_waveform_duration', (['waveform'], {}), '(waveform)\n', (10674, 10684), False, 'from neuropixels.generalephys import get_waveform_duration, get_waveform_PTratio, get_waveform_repolarizationslope, option234_positions\n'), ((10698, 10728), 'neuropixels.generalephys.get_waveform_PTratio', 'get_waveform_PTratio', (['waveform'], {}), '(waveform)\n', (10718, 10728), False, 'from neuropixels.generalephys import get_waveform_duration, get_waveform_PTratio, get_waveform_repolarizationslope, option234_positions\n'), ((10754, 10807), 'neuropixels.generalephys.get_waveform_repolarizationslope', 'get_waveform_repolarizationslope', (['waveform'], {'window': '(18)'}), '(waveform, window=18)\n', (10786, 10807), False, 'from neuropixels.generalephys import get_waveform_duration, get_waveform_PTratio, get_waveform_repolarizationslope, option234_positions\n'), ((20376, 20390), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (20388, 20390), True, 'import matplotlib.pyplot as plt\n'), ((20754, 20818), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['[xloc, yloc]', '(1.0)', '(2.0)'], {'ec': '"""none"""', 'ls': '"""None"""'}), "([xloc, yloc], 1.0, 2.0, ec='none', ls='None')\n", (20772, 20818), True, 'import matplotlib.patches as mpatches\n'), ((21298, 21341), 'matplotlib.pyplot.plot', 'plt.plot', (['[-2.5, -2]', '[ch / 2, ch / 2]', '"""k"""'], {}), "([-2.5, -2], [ch / 2, ch / 2], 'k')\n", (21306, 21341), True, 'import matplotlib.pyplot as plt\n'), ((21381, 21430), 'matplotlib.pyplot.plot', 'plt.plot', (['highlightX', 'highlightY'], {'color': '[1, 1, 1]'}), '(highlightX, highlightY, color=[1, 1, 1])\n', (21389, 21430), True, 'import matplotlib.pyplot as plt\n'), ((22006, 22020), 'numpy.min', 'np.min', (['firsts'], {}), '(firsts)\n', (22012, 22020), True, 'import numpy as np\n'), ((22021, 22034), 'numpy.max', 'np.max', (['lasts'], {}), '(lasts)\n', (22027, 22034), True, 'import numpy as np\n'), ((619, 639), 'numpy.shape', 'np.shape', (['template.T'], {}), '(template.T)\n', (627, 639), True, 'import numpy as np\n'), ((1831, 1875), 'os.path.join', 'os.path.join', (['raw_path', '"""spike_clusters.npy"""'], {}), "(raw_path, 'spike_clusters.npy')\n", (1843, 1875), False, 'import os\n'), ((3593, 3618), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index'}), '(index=index)\n', (3605, 3618), True, 'import pandas as pd\n'), ((10477, 10489), 'numpy.shape', 'np.shape', (['df'], {}), '(df)\n', (10485, 10489), True, 'import numpy as np\n'), ((10514, 10526), 'numpy.shape', 'np.shape', (['df'], {}), '(df)\n', (10522, 10526), True, 'import numpy as np\n'), ((10563, 10575), 'numpy.shape', 'np.shape', (['df'], {}), '(df)\n', (10571, 10575), True, 'import numpy as np\n'), ((16457, 16520), 'os.path.join', 'os.path.join', (['basepath', '"""figures"""', '"""panels"""', "(panelname + '.png')"], {}), "(basepath, 'figures', 'panels', panelname + '.png')\n", (16469, 16520), False, 'import os\n'), ((16555, 16618), 'os.path.join', 'os.path.join', (['basepath', '"""figures"""', '"""panels"""', "(panelname + '.eps')"], {}), "(basepath, 'figures', 'panels', panelname + '.eps')\n", (16567, 16618), False, 'import os\n'), ((17346, 17409), 'os.path.join', 'os.path.join', (['basepath', '"""figures"""', '"""panels"""', "(panelname + '.png')"], {}), "(basepath, 'figures', 'panels', panelname + '.png')\n", (17358, 17409), False, 'import os\n'), ((17444, 17507), 'os.path.join', 'os.path.join', (['basepath', '"""figures"""', '"""panels"""', "(panelname + '.eps')"], {}), "(basepath, 'figures', 'panels', panelname + '.eps')\n", (17456, 17507), False, 'import os\n'), ((19424, 19433), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19431, 19433), True, 'import matplotlib.pyplot as plt\n'), ((19453, 19462), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19460, 19462), True, 'import matplotlib.pyplot as plt\n'), ((19552, 19561), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19559, 19561), True, 'import matplotlib.pyplot as plt\n'), ((19608, 19617), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19615, 19617), True, 'import matplotlib.pyplot as plt\n'), ((19743, 19806), 'os.path.join', 'os.path.join', (['basepath', '"""figures"""', '"""panels"""', "(panelname + '.png')"], {}), "(basepath, 'figures', 'panels', panelname + '.png')\n", (19755, 19806), False, 'import os\n'), ((19841, 19904), 'os.path.join', 'os.path.join', (['basepath', '"""figures"""', '"""panels"""', "(panelname + '.eps')"], {}), "(basepath, 'figures', 'panels', panelname + '.eps')\n", (19853, 19904), False, 'import os\n'), ((701, 711), 'numpy.abs', 'np.abs', (['wv'], {}), '(wv)\n', (707, 711), True, 'import numpy as np\n'), ((745, 755), 'numpy.abs', 'np.abs', (['wv'], {}), '(wv)\n', (751, 755), True, 'import numpy as np\n'), ((1575, 1607), 'glob.glob', 'glob.glob', (["(raw_path + '/*100.0*')"], {}), "(raw_path + '/*100.0*')\n", (1584, 1607), False, 'import glob\n'), ((1633, 1665), 'glob.glob', 'glob.glob', (["(raw_path + '/*100.0*')"], {}), "(raw_path + '/*100.0*')\n", (1642, 1665), False, 'import glob\n'), ((14370, 14405), 'numpy.where', 'np.where', (["(df.waveform_class == 'rs')"], {}), "(df.waveform_class == 'rs')\n", (14378, 14405), True, 'import numpy as np\n'), ((14416, 14451), 'numpy.where', 'np.where', (["(df.waveform_class == 'rs')"], {}), "(df.waveform_class == 'rs')\n", (14424, 14451), True, 'import numpy as np\n'), ((14488, 14523), 'numpy.where', 'np.where', (["(df.waveform_class == 'fs')"], {}), "(df.waveform_class == 'fs')\n", (14496, 14523), True, 'import numpy as np\n'), ((14534, 14569), 'numpy.where', 'np.where', (["(df.waveform_class == 'fs')"], {}), "(df.waveform_class == 'fs')\n", (14542, 14569), True, 'import numpy as np\n'), ((14725, 14762), 'numpy.where', 'np.where', (["(df.waveform_class == 'axon')"], {}), "(df.waveform_class == 'axon')\n", (14733, 14762), True, 'import numpy as np\n'), ((14773, 14810), 'numpy.where', 'np.where', (["(df.waveform_class == 'axon')"], {}), "(df.waveform_class == 'axon')\n", (14781, 14810), True, 'import numpy as np\n'), ((14919, 14954), 'numpy.where', 'np.where', (["(df.waveform_class == 'rs')"], {}), "(df.waveform_class == 'rs')\n", (14927, 14954), True, 'import numpy as np\n'), ((14977, 15012), 'numpy.where', 'np.where', (["(df.waveform_class == 'rs')"], {}), "(df.waveform_class == 'rs')\n", (14985, 15012), True, 'import numpy as np\n'), ((15049, 15084), 'numpy.where', 'np.where', (["(df.waveform_class == 'fs')"], {}), "(df.waveform_class == 'fs')\n", (15057, 15084), True, 'import numpy as np\n'), ((15107, 15142), 'numpy.where', 'np.where', (["(df.waveform_class == 'fs')"], {}), "(df.waveform_class == 'fs')\n", (15115, 15142), True, 'import numpy as np\n'), ((15310, 15347), 'numpy.where', 'np.where', (["(df.waveform_class == 'axon')"], {}), "(df.waveform_class == 'axon')\n", (15318, 15347), True, 'import numpy as np\n'), ((15370, 15407), 'numpy.where', 'np.where', (["(df.waveform_class == 'axon')"], {}), "(df.waveform_class == 'axon')\n", (15378, 15407), True, 'import numpy as np\n'), ((15517, 15552), 'numpy.where', 'np.where', (["(df.waveform_class == 'rs')"], {}), "(df.waveform_class == 'rs')\n", (15525, 15552), True, 'import numpy as np\n'), ((15575, 15610), 'numpy.where', 'np.where', (["(df.waveform_class == 'rs')"], {}), "(df.waveform_class == 'rs')\n", (15583, 15610), True, 'import numpy as np\n'), ((15645, 15680), 'numpy.where', 'np.where', (["(df.waveform_class == 'fs')"], {}), "(df.waveform_class == 'fs')\n", (15653, 15680), True, 'import numpy as np\n'), ((15703, 15738), 'numpy.where', 'np.where', (["(df.waveform_class == 'fs')"], {}), "(df.waveform_class == 'fs')\n", (15711, 15738), True, 'import numpy as np\n'), ((15902, 15939), 'numpy.where', 'np.where', (["(df.waveform_class == 'axon')"], {}), "(df.waveform_class == 'axon')\n", (15910, 15939), True, 'import numpy as np\n'), ((15962, 15999), 'numpy.where', 'np.where', (["(df.waveform_class == 'axon')"], {}), "(df.waveform_class == 'axon')\n", (15970, 15999), True, 'import numpy as np\n'), ((16439, 16448), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (16446, 16448), True, 'import matplotlib.pyplot as plt\n'), ((16537, 16546), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (16544, 16546), True, 'import matplotlib.pyplot as plt\n'), ((16660, 16695), 'numpy.where', 'np.where', (["(df.waveform_class == 'rs')"], {}), "(df.waveform_class == 'rs')\n", (16668, 16695), True, 'import numpy as np\n'), ((16747, 16782), 'numpy.where', 'np.where', (["(df.waveform_class == 'fs')"], {}), "(df.waveform_class == 'fs')\n", (16755, 16782), True, 'import numpy as np\n'), ((16834, 16871), 'numpy.where', 'np.where', (["(df.waveform_class == 'axon')"], {}), "(df.waveform_class == 'axon')\n", (16842, 16871), True, 'import numpy as np\n'), ((17328, 17337), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (17335, 17337), True, 'import matplotlib.pyplot as plt\n'), ((17426, 17435), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (17433, 17435), True, 'import matplotlib.pyplot as plt\n'), ((19725, 19734), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (19732, 19734), True, 'import matplotlib.pyplot as plt\n'), ((19823, 19832), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (19830, 19832), True, 'import matplotlib.pyplot as plt\n'), ((20862, 20901), 'matplotlib.pyplot.plot', 'plt.plot', (['[-5, 6]', '[yloc, yloc]', '"""gray"""'], {}), "([-5, 6], [yloc, yloc], 'gray')\n", (20870, 20901), True, 'import matplotlib.pyplot as plt\n'), ((20932, 20969), 'matplotlib.pyplot.plot', 'plt.plot', (['[-5, 6]', '[yloc, yloc]', '"""-k"""'], {}), "([-5, 6], [yloc, yloc], '-k')\n", (20940, 20969), True, 'import matplotlib.pyplot as plt\n'), ((1013, 1042), 'os.path.basename', 'os.path.basename', (['base_folder'], {}), '(base_folder)\n', (1029, 1042), False, 'import os\n'), ((1076, 1105), 'os.path.basename', 'os.path.basename', (['base_folder'], {}), '(base_folder)\n', (1092, 1105), False, 'import os\n'), ((1253, 1277), 'glob.glob', 'glob.glob', (["(folder + '/*')"], {}), "(folder + '/*')\n", (1262, 1277), False, 'import glob\n'), ((11804, 11832), 'numpy.where', 'np.where', (['(waveform_k[1] == 0)'], {}), '(waveform_k[1] == 0)\n', (11812, 11832), True, 'import numpy as np\n'), ((11856, 11884), 'numpy.where', 'np.where', (['(waveform_k[1] == 1)'], {}), '(waveform_k[1] == 1)\n', (11864, 11884), True, 'import numpy as np\n'), ((16938, 16973), 'numpy.where', 'np.where', (["(df.waveform_class == 'rs')"], {}), "(df.waveform_class == 'rs')\n", (16946, 16973), True, 'import numpy as np\n'), ((16986, 17021), 'numpy.where', 'np.where', (["(df.waveform_class == 'fs')"], {}), "(df.waveform_class == 'fs')\n", (16994, 17021), True, 'import numpy as np\n'), ((17034, 17071), 'numpy.where', 'np.where', (["(df.waveform_class == 'axon')"], {}), "(df.waveform_class == 'axon')\n", (17042, 17071), True, 'import numpy as np\n'), ((18556, 18613), 'numpy.mean', 'np.mean', (['df.waveform[df.waveform_class == waveform_class]'], {}), '(df.waveform[df.waveform_class == waveform_class])\n', (18563, 18613), True, 'import numpy as np\n'), ((13037, 13065), 'numpy.where', 'np.where', (['(waveform_k[1] == 0)'], {}), '(waveform_k[1] == 0)\n', (13045, 13065), True, 'import numpy as np\n'), ((13089, 13117), 'numpy.where', 'np.where', (['(waveform_k[1] == 1)'], {}), '(waveform_k[1] == 1)\n', (13097, 13117), True, 'import numpy as np\n'), ((17914, 17930), 'numpy.abs', 'np.abs', (['waveform'], {}), '(waveform)\n', (17920, 17930), True, 'import numpy as np\n'), ((17938, 17957), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (17955, 17957), True, 'import seaborn as sns\n'), ((18118, 18134), 'numpy.abs', 'np.abs', (['waveform'], {}), '(waveform)\n', (18124, 18134), True, 'import numpy as np\n'), ((18142, 18161), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (18159, 18161), True, 'import seaborn as sns\n'), ((18318, 18334), 'numpy.abs', 'np.abs', (['waveform'], {}), '(waveform)\n', (18324, 18334), True, 'import numpy as np\n'), ((18342, 18361), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (18359, 18361), True, 'import seaborn as sns\n'), ((8011, 8075), 'numpy.array', 'np.array', (["nwb_data['processing'][probe]['UnitTimes'][u]['times']"], {}), "(nwb_data['processing'][probe]['UnitTimes'][u]['times'])\n", (8019, 8075), True, 'import numpy as np\n'), ((9394, 9461), 'numpy.array', 'np.array', (["nwb_data['processing'][probe]['UnitTimes'][u]['template']"], {}), "(nwb_data['processing'][probe]['UnitTimes'][u]['template'])\n", (9402, 9461), True, 'import numpy as np\n'), ((11109, 11126), 'numpy.min', 'np.min', (['durations'], {}), '(durations)\n', (11115, 11126), True, 'import numpy as np\n'), ((11185, 11200), 'numpy.min', 'np.min', (['PTratio'], {}), '(PTratio)\n', (11191, 11200), True, 'import numpy as np\n'), ((11267, 11294), 'numpy.min', 'np.min', (['repolarizationslope'], {}), '(repolarizationslope)\n', (11273, 11294), True, 'import numpy as np\n'), ((17577, 17607), 'numpy.where', 'np.where', (['(df.waveform[1] > 0.0)'], {}), '(df.waveform[1] > 0.0)\n', (17585, 17607), True, 'import numpy as np\n'), ((17641, 17671), 'numpy.where', 'np.where', (['(df.waveform[1] > 0.0)'], {}), '(df.waveform[1] > 0.0)\n', (17649, 17671), True, 'import numpy as np\n'), ((18627, 18684), 'numpy.mean', 'np.mean', (['df.waveform[df.waveform_class == waveform_class]'], {}), '(df.waveform[df.waveform_class == waveform_class])\n', (18634, 18684), True, 'import numpy as np\n'), ((8282, 8345), 'numpy.array', 'np.array', (["nwb_data['processing'][probe]['UnitTimes'][u]['ypos']"], {}), "(nwb_data['processing'][probe]['UnitTimes'][u]['ypos'])\n", (8290, 8345), True, 'import numpy as np\n'), ((8655, 8719), 'numpy.array', 'np.array', (["nwb_data['processing'][probe]['UnitTimes'][u]['depth']"], {}), "(nwb_data['processing'][probe]['UnitTimes'][u]['depth'])\n", (8663, 8719), True, 'import numpy as np\n'), ((9124, 9187), 'numpy.array', 'np.array', (["nwb_data['processing'][probe]['UnitTimes'][u]['xpos']"], {}), "(nwb_data['processing'][probe]['UnitTimes'][u]['xpos'])\n", (9132, 9187), True, 'import numpy as np\n'), ((11146, 11163), 'numpy.min', 'np.min', (['durations'], {}), '(durations)\n', (11152, 11163), True, 'import numpy as np\n'), ((11218, 11233), 'numpy.min', 'np.min', (['PTratio'], {}), '(PTratio)\n', (11224, 11233), True, 'import numpy as np\n'), ((11324, 11351), 'numpy.min', 'np.min', (['repolarizationslope'], {}), '(repolarizationslope)\n', (11330, 11351), True, 'import numpy as np\n'), ((12341, 12358), 'numpy.min', 'np.min', (['durations'], {}), '(durations)\n', (12347, 12358), True, 'import numpy as np\n'), ((12418, 12433), 'numpy.min', 'np.min', (['PTratio'], {}), '(PTratio)\n', (12424, 12433), True, 'import numpy as np\n'), ((12501, 12528), 'numpy.min', 'np.min', (['repolarizationslope'], {}), '(repolarizationslope)\n', (12507, 12528), True, 'import numpy as np\n'), ((8837, 8900), 'numpy.array', 'np.array', (["nwb_data['processing'][probe]['UnitTimes'][u]['ypos']"], {}), "(nwb_data['processing'][probe]['UnitTimes'][u]['ypos'])\n", (8845, 8900), True, 'import numpy as np\n'), ((12378, 12395), 'numpy.min', 'np.min', (['durations'], {}), '(durations)\n', (12384, 12395), True, 'import numpy as np\n'), ((12451, 12466), 'numpy.min', 'np.min', (['PTratio'], {}), '(PTratio)\n', (12457, 12466), True, 'import numpy as np\n'), ((12558, 12585), 'numpy.min', 'np.min', (['repolarizationslope'], {}), '(repolarizationslope)\n', (12564, 12585), True, 'import numpy as np\n')] |
from ProsNet.stack.posture_stack_abc import ABCPostureStack
from ProsNet.helper import Helper
from ProsNet.plotter import Plotter
import pandas as pd
import numpy as np
import math
import datetime
class EpochStack(ABCPostureStack, Helper, Plotter):
def __init__(self, processing_type='epoch'):
self.processing_type = processing_type
self.posture_stack = None
self.validation_stack = None
self.timeset = None
self.posture_stack_duration = None
self.posture_stack_epoch_type = None
self.posture_stack_start_time = None
def get_data(self, activity_monitor):
self.events_to_process = activity_monitor.event_data
def show_stack(self):
print('Posture Stack')
print('----------')
print('Unique class values')
print(self.posture_stack.Event_Code.unique())
print('----------')
print('Posture stack duration')
print(f"The posture stacks contains {self.posture_stack_duration} seconds of data.")
print('----------')
def create_stack(self, stack_type, subset_of_data = None, epochSize = 15):
"""
stack_type = 'mixed' or 'pure'
subset_of_data = int number of events or None
"""
self.posture_stack_epoch_type = stack_type
if self.processing_type == 'epoch':
event_data = pd.read_csv(self.events_to_process)
# subset of data for testing
if subset_of_data:
print(f'Using subset of data with just over {subset_of_data} events')
event_data = event_data.iloc[:subset_of_data]
event_data.Time = pd.to_datetime(event_data.Time, unit='d', origin='1899-12-30')
windowShift = epochSize/2
startTime = event_data.Time.iloc[0]
self.posture_stack_start_time = startTime
endTime = event_data.Time.iloc[-1]
totalTime = ((endTime - startTime).total_seconds()) + event_data['Interval (s)'].iloc[-1]
self.posture_stack_duration = totalTime
numOfEvents = math.ceil(totalTime / windowShift)
column_names = ['Start_Time', 'Finish_Time', 'Event_Code']
posture_stack = pd.DataFrame(0, index=np.arange(numOfEvents), columns=column_names)
for i in range(numOfEvents):
self.print_progress_bar(i+1, numOfEvents, 'Creating posture stack progress:')
posture_stack.iloc[i, 0] = startTime + datetime.timedelta(0,windowShift*i)
posture_stack.iloc[i, 1] = posture_stack.iloc[i, 0] + datetime.timedelta(0,epochSize)
current_epoch_startTime = event_data.Time[(event_data.Time <= posture_stack.iloc[i, 0])].tail(1).item()
current_epoch_endTime = event_data.Time[(event_data.Time <= posture_stack.iloc[i, 1])].tail(1).item()
current_epoch = event_data[(event_data.Time >= current_epoch_startTime) & (event_data.Time <= current_epoch_endTime)].copy()
if len(current_epoch.index) == 1:
posture_stack.iloc[i, 2] = current_epoch['ActivityCode (0=sedentary 1=standing 2=stepping 2.1=cycling 3.1=primary lying, 3.2=secondary lying 4=non-wear 5=travelling)']
else:
# if mixed events are required
if stack_type == 'mixed':
# Crop the time of the first and final events
first_new_value = current_epoch['Interval (s)'].iloc[0] - ((posture_stack.iloc[i, 0] - current_epoch_startTime).total_seconds())
last_new_value = ((posture_stack.iloc[i, 1] - current_epoch_endTime).total_seconds())
current_epoch.iloc[0,2]= first_new_value
current_epoch.iloc[-1,2] = last_new_value
# Work out which is the predominent event
activity_codes = current_epoch['ActivityCode (0=sedentary 1=standing 2=stepping 2.1=cycling 3.1=primary lying, 3.2=secondary lying 4=non-wear 5=travelling)'].unique()
activity_codes_counter = {}
for code in activity_codes:
activity_code_dataframe = current_epoch[current_epoch['ActivityCode (0=sedentary 1=standing 2=stepping 2.1=cycling 3.1=primary lying, 3.2=secondary lying 4=non-wear 5=travelling)'] == code]
activity_code_counter_value = activity_code_dataframe['Interval (s)'].sum()
activity_codes_counter[code] = activity_code_counter_value
max_activity_code = max(activity_codes_counter, key=activity_codes_counter.get)
# Assign predominent event as the code
posture_stack.iloc[i, 2] = max_activity_code
# if pure events are required
elif stack_type == 'pure':
if np.std(current_epoch['ActivityCode (0=sedentary 1=standing 2=stepping 2.1=cycling 3.1=primary lying, 3.2=secondary lying 4=non-wear 5=travelling)'].unique()) == 0:
posture_stack.iloc[i, 2] = current_epoch.iloc[0,3]
else:
posture_stack.iloc[i, 2] = 99
self.posture_stack = posture_stack
def create_validation_stack(self, epochSize = '1S'):
event_data = pd.read_csv(self.events_to_process)
event_data.Time = pd.to_datetime(event_data.Time, unit='d', origin='1899-12-30')
event_data.index = event_data.Time
del event_data['Time'], event_data['CumulativeStepCount'], event_data['DataCount (samples)'], event_data['Interval (s)'], event_data['Activity Score (MET.h)'], event_data['Sum(Abs(DiffX)'], event_data['Sum(Abs(DiffY)'], event_data['Sum(Abs(DiffZ)']
event_data = event_data.resample(epochSize).ffill().dropna()
self.validation_stack = event_data
def remove_epochs(self, filename = None):
if filename is not None:
file_path = filename
non_wear_data = pd.read_csv(file_path)
non_wear_data.start = pd.to_datetime(non_wear_data.start, format="%d/%m/%Y %H:%M")
non_wear_data.end = pd.to_datetime(non_wear_data.end, format="%d/%m/%Y %H:%M")
for nw_index, nw_row in non_wear_data.iterrows():
self.posture_stack = self.posture_stack.drop(self.posture_stack[(((self.posture_stack.Start_Time > nw_row.start) | (self.posture_stack.Finish_Time > nw_row.start)) & ((self.posture_stack.Start_Time < nw_row.end) | (self.posture_stack.Finish_Time < nw_row.end)))].index)
self.posture_stack = self.posture_stack.reset_index(drop=True)
# This may need updating as it could brake easily
self.posture_stack_duration = len(self.posture_stack.index) * 15 # this 15 should'nt be fixed
def export_validation(self, filename):
self.validation_stack.to_csv(filename + '_.csv', index=True) | [
"math.ceil",
"pandas.read_csv",
"numpy.arange",
"datetime.timedelta",
"pandas.to_datetime"
] | [((5399, 5434), 'pandas.read_csv', 'pd.read_csv', (['self.events_to_process'], {}), '(self.events_to_process)\n', (5410, 5434), True, 'import pandas as pd\n'), ((5461, 5523), 'pandas.to_datetime', 'pd.to_datetime', (['event_data.Time'], {'unit': '"""d"""', 'origin': '"""1899-12-30"""'}), "(event_data.Time, unit='d', origin='1899-12-30')\n", (5475, 5523), True, 'import pandas as pd\n'), ((1366, 1401), 'pandas.read_csv', 'pd.read_csv', (['self.events_to_process'], {}), '(self.events_to_process)\n', (1377, 1401), True, 'import pandas as pd\n'), ((1652, 1714), 'pandas.to_datetime', 'pd.to_datetime', (['event_data.Time'], {'unit': '"""d"""', 'origin': '"""1899-12-30"""'}), "(event_data.Time, unit='d', origin='1899-12-30')\n", (1666, 1714), True, 'import pandas as pd\n'), ((2082, 2116), 'math.ceil', 'math.ceil', (['(totalTime / windowShift)'], {}), '(totalTime / windowShift)\n', (2091, 2116), False, 'import math\n'), ((6077, 6099), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (6088, 6099), True, 'import pandas as pd\n'), ((6134, 6194), 'pandas.to_datetime', 'pd.to_datetime', (['non_wear_data.start'], {'format': '"""%d/%m/%Y %H:%M"""'}), "(non_wear_data.start, format='%d/%m/%Y %H:%M')\n", (6148, 6194), True, 'import pandas as pd\n'), ((6227, 6285), 'pandas.to_datetime', 'pd.to_datetime', (['non_wear_data.end'], {'format': '"""%d/%m/%Y %H:%M"""'}), "(non_wear_data.end, format='%d/%m/%Y %H:%M')\n", (6241, 6285), True, 'import pandas as pd\n'), ((2238, 2260), 'numpy.arange', 'np.arange', (['numOfEvents'], {}), '(numOfEvents)\n', (2247, 2260), True, 'import numpy as np\n'), ((2474, 2512), 'datetime.timedelta', 'datetime.timedelta', (['(0)', '(windowShift * i)'], {}), '(0, windowShift * i)\n', (2492, 2512), False, 'import datetime\n'), ((2580, 2612), 'datetime.timedelta', 'datetime.timedelta', (['(0)', 'epochSize'], {}), '(0, epochSize)\n', (2598, 2612), False, 'import datetime\n')] |
"""Script demonstrating the ground effect contribution.
The simulation is run by a `CtrlAviary` environment.
Example
-------
In a terminal, run as:
$ python groundeffect.py
Notes
-----
The drone altitude tracks a sinusoid, near the ground plane.
"""
import os
import time
import argparse
from datetime import datetime
import pdb
import math
import random
import numpy as np
import pybullet as p
import matplotlib.pyplot as plt
from gym_pybullet_drones.envs.BaseAviary import DroneModel, Physics
from gym_pybullet_drones.envs.CtrlAviary import CtrlAviary
from gym_pybullet_drones.envs.VisionAviary import VisionAviary
from gym_pybullet_drones.control.HexControl import *
from gym_pybullet_drones.utils.Logger import Logger
from gym_pybullet_drones.utils.utils import sync, str2bool
def plotContactData():
plt.figure()
plt.subplot(221)
plt.title('Normal Force (N)')
plt.plot(contact_forces)
plt.subplot(222)
plt.title('Contact Distance (m)')
plt.plot(contact_distance)
plt.subplot(223)
plt.title('Friction_x (N)')
plt.plot(friction_x)
plt.subplot(224)
plt.title('Friction_y (N)')
plt.plot(friction_y)
plt.show()
if __name__ == "__main__":
#### Define and parse (optional) arguments for the script ##
parser = argparse.ArgumentParser(description='Ground effect script using CtrlAviary and DSLPIDControl')
parser.add_argument('--gui', default=True, type=str2bool, help='Whether to use PyBullet GUI (default: True)', metavar='')
parser.add_argument('--record_video', default=False, type=str2bool, help='Whether to record a video (default: False)', metavar='')
parser.add_argument('--plot', default=True, type=str2bool, help='Whether to plot the simulation results (default: True)', metavar='')
parser.add_argument('--user_debug_gui', default=False, type=str2bool, help='Whether to add debug lines and parameters to the GUI (default: False)', metavar='')
parser.add_argument('--aggregate', default=False, type=str2bool, help='Whether to aggregate physics steps (default: False)', metavar='')
parser.add_argument('--obstacles', default=False, type=str2bool, help='Whether to add obstacles to the environment (default: True)', metavar='')
parser.add_argument('--simulation_freq_hz', default=250, type=int, help='Simulation frequency in Hz (default: 240)', metavar='')
parser.add_argument('--control_freq_hz', default=30, type=int, help='Control frequency in Hz (default: 48)', metavar='')
parser.add_argument('--duration_sec', default=20, type=int, help='Duration of the simulation in seconds (default: 5)', metavar='')
parser.add_argument('--visualize_box', default=True, type=str2bool, help='Visualize the boxes (default: True)', metavar='')
parser.add_argument('--drone_model', default=DroneModel.HEXP, type=DroneModel, help='Drone Model (default: True)', metavar='')
ARGS = parser.parse_args()
#### Box parameters ########################################
BOX_SIDE = 0.2 # m
TIME_SIDE = 5 #s
#### Initialize the simulation #############################
TABLE_HEIGHT = 0.6385
Z_OFFSET = 0.132
INIT_XYZ = np.array([0,0,0.2]).reshape(1,3)
AGGR_PHY_STEPS = int(ARGS.simulation_freq_hz/ARGS.control_freq_hz) if ARGS.aggregate else 1
#### Create the environment ################################
env = CtrlAviary(drone_model=ARGS.drone_model,
num_drones=1,
num_rotors=6,
rotor_angle=10, #degrees
initial_xyzs=INIT_XYZ,
physics=Physics.PYB_GND_DRAG_DW,
neighbourhood_radius=10,
freq=ARGS.simulation_freq_hz,
aggregate_phy_steps=AGGR_PHY_STEPS,
gui=ARGS.gui,
record=ARGS.record_video,
obstacles=ARGS.obstacles,
user_debug_gui=ARGS.user_debug_gui
)
#### Obtain the PyBullet Client ID from the environment ####
PYB_CLIENT = env.getPyBulletClient()
#### Initialize the logger #################################
logger = Logger(logging_freq_hz=int(ARGS.simulation_freq_hz/AGGR_PHY_STEPS),
num_drones=1
)
# time.sleep(10)
#### Initialize the controller #############################
ctrl = HexPIDControlEul(drone_model=ARGS.drone_model)
# if ARGS.visualize_box:
# p.addUserDebugLine([-BOX_SIDE/2,-BOX_SIDE/2,TABLE_HEIGHT], [BOX_SIDE/2,-BOX_SIDE/2,TABLE_HEIGHT], [0,0,1])
# p.addUserDebugLine([BOX_SIDE/2,-BOX_SIDE/2,TABLE_HEIGHT], [BOX_SIDE/2,BOX_SIDE/2,TABLE_HEIGHT], [0,0,1])
# p.addUserDebugLine([BOX_SIDE/2, BOX_SIDE/2,TABLE_HEIGHT], [-BOX_SIDE/2, BOX_SIDE/2,TABLE_HEIGHT], [0,0,1])
# p.addUserDebugLine([-BOX_SIDE/2, BOX_SIDE/2,TABLE_HEIGHT], [-BOX_SIDE/2,-BOX_SIDE/2,TABLE_HEIGHT], [0,0,1])
#### Run the simulation ####################################
CTRL_EVERY_N_STEPS = int(np.floor(env.SIM_FREQ/ARGS.control_freq_hz))
action = {"0": np.array([0]*6)}
START = time.time()
ctrl_counter = 0
line_counter = 0
corner_ind = 0
uav_pos = INIT_XYZ.reshape(3,)
TARGET_POS = INIT_XYZ.reshape(3,)
error = 0
ERROR = []
ERROR_XY = []
for i in range(0, int(ARGS.duration_sec*env.SIM_FREQ), AGGR_PHY_STEPS):
#### Step the simulation ###################################
obs, reward, done, info = env.step(action)
e = np.linalg.norm(np.array(TARGET_POS) - np.array(obs["0"]["state"][:3]))
e_xy = np.linalg.norm(np.array(TARGET_POS[:2]) - np.array(obs["0"]["state"][:2]))
ERROR.append(e)
ERROR_XY.append(e_xy)
error += (e**2)*AGGR_PHY_STEPS
TARGET_POS = [1]*3
TARGET_POS[0] = 1*np.cos(i/200)
TARGET_POS[1] = 1*np.sin(i/200)
#### Compute control at the desired frequency ##############
if i%CTRL_EVERY_N_STEPS == 0:
#### Compute control for the current way point #############
action["0"], _, _ = ctrl.computeControlFromState(control_timestep=CTRL_EVERY_N_STEPS*env.TIMESTEP,
state=obs["0"]["state"],
target_pos=TARGET_POS,
)
print("====>", action["0"])
#### Go to the next way point and loop #####################
ctrl_counter = ctrl_counter + 1 #if ctrl_counter < (NUM_WP-1) else 0
#### Log the simulation ####################################
logger.log(drone=0,
timestamp=i/env.SIM_FREQ,
state= obs["0"]["state"],
control=np.hstack([TARGET_POS, np.zeros(9)])
)
#### Printout ##############################################
if i%env.SIM_FREQ == 0:
env.render()
#### Sync the simulation ###################################
if ARGS.gui:
sync(i, START, env.TIMESTEP)
#### Close the environment #################################
env.close()
#### Save the simulation results ###########################
# logger.save()
# logger.save_as_csv("gnd") # Optional CSV save
#### Print RMSE ############################################
RMSE = np.sqrt(error/int(ARGS.duration_sec*env.SIM_FREQ))
plt.figure()
plt.title('Error (m) vs Time t (s)')
plt.plot(ERROR, label = 'Error')
plt.plot(ERROR_XY, label = 'Error XY')
print("=========>> RMSE: ", RMSE)
plt.legend()
plt.show()
#### Print Contact Details #################################
contact_forces = []
contact_distance = []
friction_x = []
friction_y = []
for d in env._getContactData():
if len(d) == 0:
contact_forces.append(0)
contact_distance.append(1)
friction_x.append(0)
friction_y.append(0)
else:
contact_forces.append(d[9])
contact_distance.append(d[8])
friction_x.append(d[12])
friction_y.append(d[10])
#### Plot the simulation results ###########################
if ARGS.plot:
# plotContactData()
logger.plot()
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.floor",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"gym_pybullet_drones.utils.utils.sync",
"gym_pybullet_drones.envs.CtrlAviary.CtrlAviary",
"numpy.cos",
"time.time",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotli... | [((820, 832), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (830, 832), True, 'import matplotlib.pyplot as plt\n'), ((837, 853), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (848, 853), True, 'import matplotlib.pyplot as plt\n'), ((858, 887), 'matplotlib.pyplot.title', 'plt.title', (['"""Normal Force (N)"""'], {}), "('Normal Force (N)')\n", (867, 887), True, 'import matplotlib.pyplot as plt\n'), ((892, 916), 'matplotlib.pyplot.plot', 'plt.plot', (['contact_forces'], {}), '(contact_forces)\n', (900, 916), True, 'import matplotlib.pyplot as plt\n'), ((921, 937), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (932, 937), True, 'import matplotlib.pyplot as plt\n'), ((942, 975), 'matplotlib.pyplot.title', 'plt.title', (['"""Contact Distance (m)"""'], {}), "('Contact Distance (m)')\n", (951, 975), True, 'import matplotlib.pyplot as plt\n'), ((980, 1006), 'matplotlib.pyplot.plot', 'plt.plot', (['contact_distance'], {}), '(contact_distance)\n', (988, 1006), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1027), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (1022, 1027), True, 'import matplotlib.pyplot as plt\n'), ((1032, 1059), 'matplotlib.pyplot.title', 'plt.title', (['"""Friction_x (N)"""'], {}), "('Friction_x (N)')\n", (1041, 1059), True, 'import matplotlib.pyplot as plt\n'), ((1064, 1084), 'matplotlib.pyplot.plot', 'plt.plot', (['friction_x'], {}), '(friction_x)\n', (1072, 1084), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1105), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (1100, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1137), 'matplotlib.pyplot.title', 'plt.title', (['"""Friction_y (N)"""'], {}), "('Friction_y (N)')\n", (1119, 1137), True, 'import matplotlib.pyplot as plt\n'), ((1142, 1162), 'matplotlib.pyplot.plot', 'plt.plot', (['friction_y'], {}), '(friction_y)\n', (1150, 1162), True, 'import matplotlib.pyplot as plt\n'), ((1167, 1177), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1175, 1177), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1389), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Ground effect script using CtrlAviary and DSLPIDControl"""'}), "(description=\n 'Ground effect script using CtrlAviary and DSLPIDControl')\n", (1313, 1389), False, 'import argparse\n'), ((3601, 3951), 'gym_pybullet_drones.envs.CtrlAviary.CtrlAviary', 'CtrlAviary', ([], {'drone_model': 'ARGS.drone_model', 'num_drones': '(1)', 'num_rotors': '(6)', 'rotor_angle': '(10)', 'initial_xyzs': 'INIT_XYZ', 'physics': 'Physics.PYB_GND_DRAG_DW', 'neighbourhood_radius': '(10)', 'freq': 'ARGS.simulation_freq_hz', 'aggregate_phy_steps': 'AGGR_PHY_STEPS', 'gui': 'ARGS.gui', 'record': 'ARGS.record_video', 'obstacles': 'ARGS.obstacles', 'user_debug_gui': 'ARGS.user_debug_gui'}), '(drone_model=ARGS.drone_model, num_drones=1, num_rotors=6,\n rotor_angle=10, initial_xyzs=INIT_XYZ, physics=Physics.PYB_GND_DRAG_DW,\n neighbourhood_radius=10, freq=ARGS.simulation_freq_hz,\n aggregate_phy_steps=AGGR_PHY_STEPS, gui=ARGS.gui, record=ARGS.\n record_video, obstacles=ARGS.obstacles, user_debug_gui=ARGS.user_debug_gui)\n', (3611, 3951), False, 'from gym_pybullet_drones.envs.CtrlAviary import CtrlAviary\n'), ((5357, 5368), 'time.time', 'time.time', ([], {}), '()\n', (5366, 5368), False, 'import time\n'), ((7740, 7752), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7750, 7752), True, 'import matplotlib.pyplot as plt\n'), ((7757, 7793), 'matplotlib.pyplot.title', 'plt.title', (['"""Error (m) vs Time t (s)"""'], {}), "('Error (m) vs Time t (s)')\n", (7766, 7793), True, 'import matplotlib.pyplot as plt\n'), ((7798, 7828), 'matplotlib.pyplot.plot', 'plt.plot', (['ERROR'], {'label': '"""Error"""'}), "(ERROR, label='Error')\n", (7806, 7828), True, 'import matplotlib.pyplot as plt\n'), ((7835, 7871), 'matplotlib.pyplot.plot', 'plt.plot', (['ERROR_XY'], {'label': '"""Error XY"""'}), "(ERROR_XY, label='Error XY')\n", (7843, 7871), True, 'import matplotlib.pyplot as plt\n'), ((7916, 7928), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7926, 7928), True, 'import matplotlib.pyplot as plt\n'), ((7933, 7943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7941, 7943), True, 'import matplotlib.pyplot as plt\n'), ((5264, 5309), 'numpy.floor', 'np.floor', (['(env.SIM_FREQ / ARGS.control_freq_hz)'], {}), '(env.SIM_FREQ / ARGS.control_freq_hz)\n', (5272, 5309), True, 'import numpy as np\n'), ((5328, 5345), 'numpy.array', 'np.array', (['([0] * 6)'], {}), '([0] * 6)\n', (5336, 5345), True, 'import numpy as np\n'), ((3396, 3417), 'numpy.array', 'np.array', (['[0, 0, 0.2]'], {}), '([0, 0, 0.2])\n', (3404, 3417), True, 'import numpy as np\n'), ((6081, 6096), 'numpy.cos', 'np.cos', (['(i / 200)'], {}), '(i / 200)\n', (6087, 6096), True, 'import numpy as np\n'), ((6121, 6136), 'numpy.sin', 'np.sin', (['(i / 200)'], {}), '(i / 200)\n', (6127, 6136), True, 'import numpy as np\n'), ((7359, 7387), 'gym_pybullet_drones.utils.utils.sync', 'sync', (['i', 'START', 'env.TIMESTEP'], {}), '(i, START, env.TIMESTEP)\n', (7363, 7387), False, 'from gym_pybullet_drones.utils.utils import sync, str2bool\n'), ((5780, 5800), 'numpy.array', 'np.array', (['TARGET_POS'], {}), '(TARGET_POS)\n', (5788, 5800), True, 'import numpy as np\n'), ((5803, 5834), 'numpy.array', 'np.array', (["obs['0']['state'][:3]"], {}), "(obs['0']['state'][:3])\n", (5811, 5834), True, 'import numpy as np\n'), ((5866, 5890), 'numpy.array', 'np.array', (['TARGET_POS[:2]'], {}), '(TARGET_POS[:2])\n', (5874, 5890), True, 'import numpy as np\n'), ((5893, 5924), 'numpy.array', 'np.array', (["obs['0']['state'][:2]"], {}), "(obs['0']['state'][:2])\n", (5901, 5924), True, 'import numpy as np\n'), ((7094, 7105), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (7102, 7105), True, 'import numpy as np\n')] |
def test_point_cloud_to_array(point_cloud):
import numpy as np
np_array = point_cloud.to_array()
assert np_array is not None
assert isinstance(np_array, np.ndarray)
def test_to_rgb_image(point_cloud):
import numpy as np
np_array = point_cloud.to_array()
image = np_array[["r", "g", "b"]]
image = np.asarray([np_array["r"], np_array["g"], np_array["b"]])
image = np.moveaxis(image, [0, 1, 2], [2, 0, 1])
image = image.astype(np.uint8)
def test_height(point_cloud):
height = point_cloud.height
assert height is not None
assert isinstance(height, int)
def test_width(point_cloud):
width = point_cloud.width
assert width is not None
assert isinstance(width, int)
def test_height_context_manager(frame):
import pytest
with frame.get_point_cloud() as point_cloud:
point_cloud.height # pylint: disable=pointless-statement
with pytest.raises(RuntimeError):
point_cloud.height # pylint: disable=pointless-statement
def test_width_context_manager(frame):
import pytest
with frame.get_point_cloud() as point_cloud:
point_cloud.width # pylint: disable=pointless-statement
with pytest.raises(RuntimeError):
point_cloud.width # pylint: disable=pointless-statement
def test_to_array_context_manager(frame):
import pytest
with frame.get_point_cloud() as point_cloud:
point_cloud.to_array()
with pytest.raises(RuntimeError):
point_cloud.to_array()
def test_illegal_init(application): # pylint: disable=unused-argument
import pytest
import zivid
with pytest.raises(TypeError):
zivid.PointCloud() # pylint: disable=no-value-for-parameter
with pytest.raises(ValueError):
zivid.PointCloud("Should fail.")
with pytest.raises(ValueError):
zivid.PointCloud(123)
| [
"numpy.moveaxis",
"zivid.PointCloud",
"numpy.asarray",
"pytest.raises"
] | [((332, 389), 'numpy.asarray', 'np.asarray', (["[np_array['r'], np_array['g'], np_array['b']]"], {}), "([np_array['r'], np_array['g'], np_array['b']])\n", (342, 389), True, 'import numpy as np\n'), ((402, 442), 'numpy.moveaxis', 'np.moveaxis', (['image', '[0, 1, 2]', '[2, 0, 1]'], {}), '(image, [0, 1, 2], [2, 0, 1])\n', (413, 442), True, 'import numpy as np\n'), ((918, 945), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (931, 945), False, 'import pytest\n'), ((1196, 1223), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1209, 1223), False, 'import pytest\n'), ((1442, 1469), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1455, 1469), False, 'import pytest\n'), ((1620, 1644), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1633, 1644), False, 'import pytest\n'), ((1654, 1672), 'zivid.PointCloud', 'zivid.PointCloud', ([], {}), '()\n', (1670, 1672), False, 'import zivid\n'), ((1725, 1750), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1738, 1750), False, 'import pytest\n'), ((1760, 1792), 'zivid.PointCloud', 'zivid.PointCloud', (['"""Should fail."""'], {}), "('Should fail.')\n", (1776, 1792), False, 'import zivid\n'), ((1803, 1828), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1816, 1828), False, 'import pytest\n'), ((1838, 1859), 'zivid.PointCloud', 'zivid.PointCloud', (['(123)'], {}), '(123)\n', (1854, 1859), False, 'import zivid\n')] |
# Forward: given model/pde parameters λ -> u(t, x)
import time, sys, os, json
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import Axes3D
# from plotting import newfig, savefig
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# from pyDOE import lhs
# from scipy.interpolate import griddata
# import scipy.io
sys.path.insert(0, '../../Utilities/') # for plotting
# from plotting import newfig, savefig
# np.random.seed(1234)
# tf.set_random_seed(1234)
class PhysicsInformedNN:
# Initialize the class
def __init__(self, xb, yb, x0, xe, y0, ye, boundaryU, boundaryU_, xf_grid, yf_grid, layers, lowerbound, upperbound, mix):
self.mix = mix
self.xb = xb
self.yb = yb
self.x0 = x0
self.xe = xe
self.y0 = y0
self.ye = ye
self.ul = boundaryU[0]
self.ur = boundaryU[1]
self.ub = boundaryU[2]
self.ut = boundaryU[3]
if self.mix:
self.ul_x = boundaryU_[0]
self.ur_x = boundaryU_[1]
self.ub_y = boundaryU_[2]
self.ut_y = boundaryU_[3]
self.xf_grid = xf_grid
self.yf_grid = yf_grid
self.lowerbound = lowerbound
self.upperbound = upperbound
self.layers = layers
# Initialize NN
self.weights, self.biases = self.initialize_NN(layers)
# shape: (N_b, 1)
self.xb_tf = tf.placeholder(tf.float32, shape=[None, self.xb.shape[1]])
self.yb_tf = tf.placeholder(tf.float32, shape=[None, self.yb.shape[1]])
self.x0_tf = tf.placeholder(tf.float32, shape=[None, self.x0.shape[1]])
self.xe_tf = tf.placeholder(tf.float32, shape=[None, self.xe.shape[1]])
self.y0_tf = tf.placeholder(tf.float32, shape=[None, self.y0.shape[1]])
self.ye_tf = tf.placeholder(tf.float32, shape=[None, self.ye.shape[1]])
self.ul_tf = tf.placeholder(tf.float32, shape=[None, self.ul.shape[1]])
self.ur_tf = tf.placeholder(tf.float32, shape=[None, self.ur.shape[1]])
self.ub_tf = tf.placeholder(tf.float32, shape=[None, self.ub.shape[1]])
self.ut_tf = tf.placeholder(tf.float32, shape=[None, self.ut.shape[1]])
if self.mix:
self.ul_x_tf = tf.placeholder(tf.float32, shape=[None, self.ul_x.shape[1]])
self.ur_x_tf = tf.placeholder(tf.float32, shape=[None, self.ur_x.shape[1]])
self.ub_y_tf = tf.placeholder(tf.float32, shape=[None, self.ub_y.shape[1]])
self.ut_y_tf = tf.placeholder(tf.float32, shape=[None, self.ut_y.shape[1]])
# shape: (N_f * N_f, 1) because in net_all: X = tf.concat([x,y],1)
self.xf_grid_tf = tf.placeholder(tf.float32, shape=[None, self.xf_grid.shape[1]])
self.yf_grid_tf = tf.placeholder(tf.float32, shape=[None, self.yf_grid.shape[1]])
self.lr_tf = tf.placeholder(tf.float32)
# tf Graphs: u, u_x, u_y, f = net_all(x, y)
self.ul_pred, self.ul_x_pred, _, _ = self.net_all(self.x0_tf, self.yb_tf)
self.ur_pred, self.ur_x_pred, _, _ = self.net_all(self.xe_tf, self.yb_tf)
self.ub_pred, _, self.ub_y_pred, _ = self.net_all(self.xb_tf, self.y0_tf)
self.ut_pred, _, self.ut_y_pred, _ = self.net_all(self.xb_tf, self.ye_tf)
# used in predict (only call net_all once)
self.uf_pred, _, _, self.f_pred = self.net_all(self.xf_grid_tf, self.yf_grid_tf)
# Loss: boundary(u, u_x, u_y) + PDE (f = u_xx + u_yy = 0)
if not self.mix: # purely u for boundary condition
self.loss = tf.reduce_mean(tf.square(self.ul_tf - self.ul_pred)) + \
tf.reduce_mean(tf.square(self.ur_tf - self.ur_pred)) + \
tf.reduce_mean(tf.square(self.ub_tf - self.ub_pred)) + \
tf.reduce_mean(tf.square(self.ut_tf - self.ut_pred)) + \
tf.reduce_mean(tf.square(self.f_pred))
else: # mix of u and u_x, u_y for boundary condition
self.loss = tf.reduce_mean(tf.square(self.ul_x_tf - self.ul_x_pred)) + \
tf.reduce_mean(tf.square(self.ur_tf - self.ur_pred)) + \
tf.reduce_mean(tf.square(self.ub_tf - self.ub_pred)) + \
tf.reduce_mean(tf.square(self.ut_y_tf - self.ut_y_pred)) + \
tf.reduce_mean(tf.square(self.f_pred))
# tf.reduce_mean: computes the mean of elements across dimensions of a tensor
# Optimizers:
# return a minimization Op (a graph node that performs computation on tensors) -> updates weights and biases
self.train_op_Adam = tf.train.AdamOptimizer(learning_rate = self.lr_tf).minimize(self.loss)
# tf session: initiates a tf Graph (defines computations) that processes tensors through operations + allocates resources + holds intermediate values
self.sess = tf.Session()
# variables now hold the values from declarations: tf.Variable(tf.zeros(...)), tf.Variable(tf.random_normal(...)), etc
init = tf.global_variables_initializer()
self.sess.run(init) # required to initialize the variables
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0, num_layers-1):
# tf.Variable: for trainable variables/mutable tensor values that persist across multiple sesssion.run()
# https://towardsdatascience.com/understanding-fundamentals-of-tensorflow-program-and-why-it-is-necessary-94cf5b60e255
weights.append(self.xavier_init(size=[layers[l], layers[l+1]]))
biases.append(tf.Variable(tf.zeros([1, layers[l+1]], dtype=tf.float32), dtype=tf.float32)) # all zeros
return weights, biases
def xavier_init(self, size):
# https://towardsdatascience.com/weight-initialization-in-neural-networks-a-journey-from-the-basics-to-kaiming-954fb9b47c79
# Want each layer's activation outputs to have stddev around 1 -> repeat matrix mult across as many layers without activations exploding or vanishing
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
# random values from a truncated normal distribution (values whose magnitude>2 staddev from mean are dropped and re-picked)
# Shape of the output tensor: [layers[l], layers[l+1]]
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lowerbound)/(self.upperbound - self.lowerbound) - 1 # Initializing first input: mapping to [-1, 1]
for l in range(0, num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b)) # passing along networks
# NOTE: H*W=(50, 20) + B(1, 20) -> tf does broadcasting: B becomes (50, 20)
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b) # passed 5 times in total
return Y
def net_all(self, x, y):
X = tf.concat([x, y], 1) # input
# x = [[-0.5], [0.5]] # y = [[0], [1]]
# [[-0.5, 0]
# [0.5, 1]]
u = self.neural_net(X, self.weights, self.biases)
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
u_y = tf.gradients(u, y)[0]
u_yy = tf.gradients(u_y, y)[0]
f = u_xx + u_yy # f = u_xx + u_yy = 0
return u, u_x, u_y, f
def callback(self, loss):
print('Loss:', loss)
def train(self, lr): # one iteration: uses all training data from tf_dict and updates weights and biases
tf_dict = { self.x0_tf: self.x0, self.xe_tf: self.xe, self.xb_tf: self.xb,
self.y0_tf: self.y0, self.ye_tf: self.ye, self.yb_tf: self.yb,
self.ul_tf: self.ul, self.ur_tf: self.ur, self.ub_tf: self.ub, self.ut_tf: self.ut,
self.xf_grid_tf: self.xf_grid, self.yf_grid_tf: self.yf_grid,
self.lr_tf: lr}
if self.mix:
tf_dict.update({
self.ul_x_tf: self.ul_x, self.ur_x_tf: self.ur_x,
self.ub_y_tf: self.ub_y, self.ut_y_tf: self.ut_y
})
# feeding training examples during training and running the minimization Op of self.loss
self.sess.run(self.train_op_Adam, tf_dict)
loss_value = self.sess.run(self.loss, tf_dict)
return loss_value
def predict(self, x_grid, y_grid): # tf.concat([x, y], 1)
tf_dict = {self.xf_grid_tf: x_grid, self.yf_grid_tf: y_grid}
u, f = self.sess.run([self.uf_pred, self.f_pred], tf_dict)
return u, f
def contourPlot(xtest_mesh, ytest_mesh, u_test, u_pred, N_test, i):
fig = plt.figure(figsize=(6.0, 5.3))
gs = gridspec.GridSpec(nrows=2, ncols=3, figure=fig, width_ratios=[6, 6, 0.6], height_ratios=[1, 1], wspace=0.41, hspace=0.33)
ax = fig.add_subplot(gs[0, 0])
cset1 = ax.contourf(xtest_mesh, ytest_mesh, np.reshape(u_test, (N_test, N_test)), levels=30, cmap='winter')
plt.gca().set(xlabel='$x$', ylabel='$y$', title='Exact') # $: mathematical font like latex
ax = fig.add_subplot(gs[0, 1])
cset2 = ax.contourf(xtest_mesh, ytest_mesh, np.reshape(u_pred, (N_test, N_test)), levels=30, cmap='winter')
plt.gca().set(xlabel='$x$', ylabel='$y$', title='Prediction')
ax = fig.add_subplot(gs[0, 2])
fig.colorbar(cset2, cax=ax)
ax = fig.add_subplot(gs[1, 0:2])
cset3 = ax.contourf(xtest_mesh, ytest_mesh, np.reshape(np.abs(u_pred-u_test), (N_test, N_test)), levels=30, cmap='autumn')
plt.gca().set(xlabel='$x$', ylabel='$y$', title='|Prediction - Exact|')
ax = fig.add_subplot(gs[1, 2])
fig.colorbar(cset3, cax=ax)
plt.suptitle(f'Snapshot at Iteration = {i+1}')
fig.subplots_adjust(left=0.09, right=0.89, bottom=0.08, top=0.90)
plt.savefig(f'{dirpath}/forward_2d_contour_iter{i+1}_new.pdf')
plt.close(fig)
with open(f'{dirpath}/forward_2d_contour_upred_iter{i+1}.json', 'w') as f:
json.dump(u_pred.tolist(), f)
def axisToGrid(x, y): # [[0] [0.5] [1]] (N, 1)
x_mesh, y_mesh = np.meshgrid(x, y) # [[0 0.5 1] [0 0.5 1] [0 0.5 1]], [[0 0 0] [0.5 0.5 0.5] [1 1 1]] (N, N)
x_grid = np.reshape(x_mesh.flatten(), (-1, 1)) # [[0] [0.5] [1] [0] [0.5] [1] [0] [0.5] [1]] # (N * N, 1)
y_grid = np.reshape(y_mesh.flatten(), (-1, 1)) # [[0] [0] [0] [0.5] [0.5] [0.5] [1] [1] [1]] # (N * N, 1)
return x_mesh, y_mesh, x_grid, y_grid # net_all: X = tf.concat([x,y],1)
if __name__ == "__main__":
# u_xx + u_yy = 0, x in [0, 1], y in [0, 1]
# u(0, y) = -y^2 ## left (u)
# u(1, y) = 1 - y^2 + 3y ## right (u)
# u(x, 0) = x^2 ## bottom (u)
# u(x, 1) = x^2 - 1 + 3x ## top (u)
# u_x(0, y) = 2x + 3y = 3y ## left (du/dx)
# u_x(1, y) = 2x + 3y = 2 + 3y ## right (du/dx)
# u_y(x, 0) = -2y + 3x = 3x ## bottom (du/dy)
# u_y(x, 1) = -2y + 3x = -2 + 3x ## top (du/dy)
# analytical solution: u(x, y) = x^2 - y^2 + 3xy
# NOTE: du/dn (normal direction) for boundary condition:
# 1) additional information
# 2) makes sense this way: u=temperature, fixed boundary temperatue, du/dn indicates influx/outflux
# NOTE: need at least one edge to be u(x, y), otherwise solution have arbitrary constant
# NOTE: Boundary condition can have order > 1
###########################
## PART 1: setting parameters and getting accurate data for evaluation
# 4-layer deep NN with 20 neurons/layer & hyperbolic tangent act. func.
layers = [2, 50, 50, 50, 1]
mix = True # mix of boundary conditions (u, u_x, u_y)
# Domain bounds
lowerbound = np.array([0, 0])
upperbound = np.array([1, 1])
###########################
## PART 2:setting training and testing data from full analytical solution for uniform grid
# boundary condition
N_b = 20
xb = np.reshape(np.linspace(lowerbound[0], upperbound[0], N_b), (-1, 1)) # [[0] [0.5] [1]] (N_b, 1)
yb = np.reshape(np.linspace(lowerbound[1], upperbound[1], N_b), (-1, 1))
x0 = 0 * yb + lowerbound[0] # left edge # [[0] [0] [0]]
xe = 0 * yb + upperbound[0] # right edge
y0 = 0 * xb + lowerbound[1] # bottom edge
ye = 0 * xb + upperbound[1] # top edge
ul = -1 * yb**2 # u(0, y)
ur = 1 - yb**2 + 3 * yb # u(1, y)
ub = xb**2 # u(x, 0)
ut = xb**2 - 1 + 3 * xb # u(x, 1)
ul_x = 3 * yb # u_x(0, y)
ur_x = 2 + 3 * yb # u_x(1, y)
ub_y = 3 * xb # u_y(x, 0)
ut_y = -2 + 3 * xb # u_y(x, 1)
# collocation points for enforcing f=0 from uniform grid
# NOTE: want PDE satisfied at positions arbitrarily close to boundary -> include boundary points in collocation points
# NOTE: Generally, want interval of training point < smallest characteristic of solution (fluctuation) (dense enough to capture all landscape within domain)
# NOTE: To estimate the density: can estimate fluctuation frequency from f (known), geometry (sharp region higher frequency), prior knowledge
N_f = 30 # along one axis
xf = np.reshape(np.linspace(lowerbound[0], upperbound[0], N_f), (-1, 1)) # (N_f, 1)
yf = np.reshape(np.linspace(lowerbound[1], upperbound[1], N_f), (-1, 1)) # (N_f, 1)
_, _, xf_grid, yf_grid = axisToGrid(xf, yf) # (N_f * N_f, 1)
# testing data
N_test = 50 # NOTE: different from collocation points
xtest = np.reshape(np.linspace(lowerbound[0], upperbound[0], N_test), (-1, 1)) # (N_test, 1)
ytest = np.reshape(np.linspace(lowerbound[1], upperbound[1], N_test), (-1, 1)) # (N_test, 1)
xtest_mesh, ytest_mesh, xtest_grid, ytest_grid = axisToGrid(xtest, ytest) # # (N_test, N_test), (N_test * N_test, 1)
u_test = xtest_grid**2 - ytest_grid**2 + 3 * xtest_grid * ytest_grid # (N_test * N_test, 1)
###########################
## PART 3: forming the network, training, predicting
model = PhysicsInformedNN(xb, yb, x0, xe, y0, ye, [ul, ur, ub, ut], [ul_x, ur_x, ub_y, ut_y], xf_grid, yf_grid, layers, lowerbound, upperbound, mix)
start_time = time.time()
# settings for plots
dirpath = f'./main/2d/forward_2d_figures/{start_time}' # where figures are stored
os.mkdir(dirpath)
ticksize = 8.5
plt.rcParams['xtick.labelsize'] = ticksize
plt.rcParams['ytick.labelsize'] = ticksize
plt.rcParams['axes.labelsize'] = 9.5
plt.rcParams['axes.titlesize'] = 10.5
plt.rcParams['lines.markersize'] = 4
plt.rcParams['legend.handlelength'] = 0.4
annotatesize = 9.5
# Plot 1. Boundary Point, Collocation Point
fig = plt.figure(figsize=(4.2, 2.9))
bc, = plt.plot(np.concatenate((x0,xe,xb,xb)), np.concatenate((yb,yb,y0,ye)), 'H', c='#ffa96b', label = 'Boundary Point', clip_on=False)
cp, = plt.plot(xf_grid, yf_grid, '.', c='#81c9fc', label = 'Collocation Point', clip_on=False)
plt.gca().set(xlim=(0, 1), ylim=(0, 1), xlabel='x', ylabel='y', title='Training Data')
plt.figlegend(handles=[bc, cp], loc='center right', bbox_to_anchor=(0.5, 0., 0.5, 0.5), fontsize=ticksize, framealpha=0.9)
fig.subplots_adjust(left=0.11, right=0.67, bottom=0.13, top=0.92)
plt.savefig(f'{dirpath}/trainingdata.pdf')
plt.close(fig)
dataDict = {
'boundary points':{
'N_b': N_b,
'xb': xb.tolist(),
'yb': yb.tolist(),
'x0': x0.tolist(),
'xe': xe.tolist(),
'y0': y0.tolist(),
'ye': ye.tolist(),
'ul': ul.tolist(),
'ur': ur.tolist(),
'ub': ub.tolist(),
'ut': ut.tolist(),
'ul_x': ul_x.tolist(),
'ur_x': ur_x.tolist(),
'ub_y': ub_y.tolist(),
'ut_y': ut.tolist(),
},
'collocation points':{
'N_f': N_f,
'xf_grid': xf_grid.tolist(),
'yf_grid': yf_grid.tolist(),
},
'testing data':{
"N_test": N_test,
"xtest_mesh": xtest_mesh.tolist(),
"ytest_mesh": ytest_mesh.tolist(),
"u_test": u_test.tolist()
}
}
with open(f'{dirpath}/data.json', 'w') as f:
json.dump(dataDict, f)
# Note: loss around 10^-3/-4 should be about good
loss_values, u_preds, f_preds = ([] for i in range(3))
N_iter = 10000
loss_value_step = 10
pred_step = 100
contour_step = 1000 # if not pred_step's multiple, graph every least common multiple (pred_step, contour_step)
for i in range(N_iter):
lr = 10**-3 * 2**(-i/10000) if i <= 60000 else 10**-3 * 2**(-60000/10000) # 0.00002210/0.00001563 # learning rate decay
loss_value = model.train(lr) # from last iteration
if (i+1) % loss_value_step == 0: # start with i=9 and end with i=8999 (last iter)
loss_values.append(float(loss_value))
print('Iter: %d, Loss: %.3e, Time: %.2f, Learning Rate: %.8f' % (i+1, loss_value, time.time() - start_time, lr))
if (i+1) % pred_step == 0: # start with i=999 and end with i=8999 (last iter)
u_pred, f_pred = model.predict(xtest_grid, ytest_grid)
u_preds.append(u_pred) # (N_test * N_test, 1)
f_preds.append(f_pred) # (N_test * N_test, 1)
if (i+1) % contour_step == 0: # start with i=2999 and end with i=8999 (last iter)
## Plot 2. u (Exact, Preidction) vs (x,y) and |u_pred-u_test| vs (x,y): contour
contourPlot(xtest_mesh, ytest_mesh, u_test, u_pred, N_test, i)
training_time = time.time() - start_time
u_preds = np.array(u_preds)
f_preds = np.array(f_preds)
u_pred, f_pred = model.predict(xtest_grid, ytest_grid)
# NOTE: what is important is the function u_pred resembles, not so much the parameters (weights & biases)
# NOTE: if no analytical solution, find numerical method/other method to verify -> directly use network
###########################
## PART 4: calculating errors
error_u = np.linalg.norm(u_pred - u_test, 2) / np.linalg.norm(u_test, 2) # scalar
print('Error u: %e' % (error_u))
###########################
## PART 5: Plotting
# Plot 3. loss vs. iteration
fig = plt.figure(figsize=(6.8, 6))
plt.ticklabel_format(axis='x', style="sci", scilimits=(3,3))
x_coords = loss_value_step * (np.array(range(len(loss_values))) + 1)
plt.semilogy(x_coords, loss_values) # linear X axis, logarithmic y axis(log scaling on the y axis)
plt.gca().set(xlabel='Iteration', ylabel='Loss', title='Loss during Training')
init_tuple = (loss_value_step, loss_values[0])
plt.annotate('(%d, %.3e)' % init_tuple, xy=init_tuple, fontsize=annotatesize, ha='left')
last_tuple = (N_iter, loss_values[-1])
plt.annotate('(%d, %.3e)' % last_tuple, xy=last_tuple, fontsize=annotatesize, ha='right', va='top')
plt.plot([init_tuple[0], last_tuple[0]], [init_tuple[1], last_tuple[1]], '.', c='#3B75AF')
fig.subplots_adjust(left=0.1, right=0.98, bottom=0.07, top=0.95)
# NOTE: Oscillation: actually very small nummerical difference because of small y scale
# 1. overshoot (fixed -> decaying learning rate)
# 2. Adam: gradient descent + momentum (sometime parameter change makes the loss go up)
plt.savefig(f'{dirpath}/forward_2d_loss.pdf')
plt.close(fig)
with open(f'{dirpath}/forward_2d_loss.json', 'w') as f:
json.dump({"x_coords": x_coords.tolist(), "loss_values": loss_values}, f)
# Plot 4. MSE between u_pred and u_test vs. iteration
fig = plt.figure(figsize=(6, 6))
plt.ticklabel_format(axis='x', style="sci", scilimits=(3,3))
x_coords = pred_step * (np.array(range(len(u_preds))) + 1)
u_mses = [((u_pred - u_test)**2).mean(axis=0) for u_pred in u_preds] #[[mse1] [mse2] [mse3]]
u_mses = np.array(u_mses)
plt.semilogy(x_coords, u_mses, '.-')
plt.gca().set(xlabel='Iteration', ylabel='MSE of u', title='MSE of u during Training')
annots = list(zip(x_coords, u_mses.flatten())) # [(1000, 4.748), (2000, 9.394)]
plt.annotate('(%d, %.3e)' % annots[0], xy=annots[0], fontsize=annotatesize, ha='left')
plt.annotate('(%d, %.3e)' % annots[-1], xy=annots[-1], fontsize=annotatesize, ha='right', va='top')
fig.subplots_adjust(left=0.1, right=0.98, bottom=0.07, top=0.95)
plt.savefig(f'{dirpath}/forward_2d_mse.pdf')
plt.close(fig)
with open(f'{dirpath}/forward_2d_mse.json', 'w') as f:
json.dump({"x_coords": x_coords.tolist(), "u_mses": u_mses.tolist()}, f)
# Plot 5. plot u vs (x, y): surface
fig = plt.figure()
ax = plt.subplot(1, 2, 1, projection='3d')
ax.plot_surface(xtest_mesh, ytest_mesh, np.reshape(u_test, (N_test, N_test)), label='Exact', cmap='winter') # Data values as 2D arrays: (N_test * N_test, 1)
plt.gca().set(xlabel='$x$', ylabel='$y$', zlabel='$u$', title='Exact')
ax.tick_params(labelsize=ticksize)
ax = plt.subplot(1, 2, 2, projection='3d')
ax.plot_surface(xtest_mesh, ytest_mesh, np.reshape(u_pred, (N_test, N_test)), label='Prediction', cmap='winter') # Data values as 2D arrays: (N_test * N_test, 1)
plt.gca().set(xlabel='$x$', ylabel='$y$', zlabel='$u$', title='Prediction')
ax.tick_params(labelsize=ticksize)
plt.savefig(f'{dirpath}/forward_2d_surface.pdf')
plt.close(fig)
###########################
## PART 6: Saving information
infoDict = {
'problem':{
'pde form': 'u_xx + u_yy = 0',
'boundary (x)': [float(lowerbound[0]),float(upperbound[0])],
'boundary (y)': [float(lowerbound[1]),float(upperbound[1])],
'boundary conditions mix': mix,
'boundary condition (u)': 'u(0, y) = -5 * y^2, u(1, y) = 1 - 5 * y^2 + 3y, u(x, 0) = x^2, u(x, 1) = x^2 - 5 + 3x',
'boundary condition (u_x, u_y)': 'u_x(0, y) = 3y, u_x(1, y) = 2 + 3y, u_y(x, 0) = 3x, u_y(x, 1) = -2 + 3x',
'analytical solution': 'u(x, y) = x^2 - y^2 + 3xy'
},
'model':{
'layers': str(layers),
'training iteration': N_iter,
'loss_value_step':loss_value_step,
'pred_step': pred_step,
'contour_step': contour_step,
'training_time': training_time,
'error_u': error_u,
},
'training data':{
'N_b': N_b,
'xb': str(xb),
'yb': str(yb),
'N_f': N_f,
'xf': str(xf),
'yf': str(yf)
}
}
with open(f'{dirpath}/info.json', 'w') as f:
json.dump(infoDict, f, indent=4) | [
"sys.path.insert",
"numpy.sqrt",
"tensorflow.gradients",
"numpy.array",
"matplotlib.pyplot.annotate",
"numpy.linalg.norm",
"matplotlib.pyplot.semilogy",
"numpy.reshape",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"tensorflow.Session",
"matplotlib.pyplot.close",
"tensorflow.concat",
... | [((414, 452), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../Utilities/"""'], {}), "(0, '../../Utilities/')\n", (429, 452), False, 'import time, sys, os, json\n'), ((8886, 8916), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.0, 5.3)'}), '(figsize=(6.0, 5.3))\n', (8896, 8916), True, 'import matplotlib.pyplot as plt\n'), ((8926, 9051), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'nrows': '(2)', 'ncols': '(3)', 'figure': 'fig', 'width_ratios': '[6, 6, 0.6]', 'height_ratios': '[1, 1]', 'wspace': '(0.41)', 'hspace': '(0.33)'}), '(nrows=2, ncols=3, figure=fig, width_ratios=[6, 6, 0.6],\n height_ratios=[1, 1], wspace=0.41, hspace=0.33)\n', (8943, 9051), True, 'import matplotlib.gridspec as gridspec\n'), ((9885, 9933), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Snapshot at Iteration = {i + 1}"""'], {}), "(f'Snapshot at Iteration = {i + 1}')\n", (9897, 9933), True, 'import matplotlib.pyplot as plt\n'), ((10006, 10070), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{dirpath}/forward_2d_contour_iter{i + 1}_new.pdf"""'], {}), "(f'{dirpath}/forward_2d_contour_iter{i + 1}_new.pdf')\n", (10017, 10070), True, 'import matplotlib.pyplot as plt\n'), ((10073, 10087), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10082, 10087), True, 'import matplotlib.pyplot as plt\n'), ((10274, 10291), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (10285, 10291), True, 'import numpy as np\n'), ((11888, 11904), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (11896, 11904), True, 'import numpy as np\n'), ((11922, 11938), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (11930, 11938), True, 'import numpy as np\n'), ((14343, 14354), 'time.time', 'time.time', ([], {}), '()\n', (14352, 14354), False, 'import time, sys, os, json\n'), ((14472, 14489), 'os.mkdir', 'os.mkdir', (['dirpath'], {}), '(dirpath)\n', (14480, 14489), False, 'import time, sys, os, json\n'), ((14854, 14884), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.2, 2.9)'}), '(figsize=(4.2, 2.9))\n', (14864, 14884), True, 'import matplotlib.pyplot as plt\n'), ((15035, 15125), 'matplotlib.pyplot.plot', 'plt.plot', (['xf_grid', 'yf_grid', '"""."""'], {'c': '"""#81c9fc"""', 'label': '"""Collocation Point"""', 'clip_on': '(False)'}), "(xf_grid, yf_grid, '.', c='#81c9fc', label='Collocation Point',\n clip_on=False)\n", (15043, 15125), True, 'import matplotlib.pyplot as plt\n'), ((15220, 15348), 'matplotlib.pyplot.figlegend', 'plt.figlegend', ([], {'handles': '[bc, cp]', 'loc': '"""center right"""', 'bbox_to_anchor': '(0.5, 0.0, 0.5, 0.5)', 'fontsize': 'ticksize', 'framealpha': '(0.9)'}), "(handles=[bc, cp], loc='center right', bbox_to_anchor=(0.5, \n 0.0, 0.5, 0.5), fontsize=ticksize, framealpha=0.9)\n", (15233, 15348), True, 'import matplotlib.pyplot as plt\n'), ((15417, 15459), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{dirpath}/trainingdata.pdf"""'], {}), "(f'{dirpath}/trainingdata.pdf')\n", (15428, 15459), True, 'import matplotlib.pyplot as plt\n'), ((15464, 15478), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (15473, 15478), True, 'import matplotlib.pyplot as plt\n'), ((17809, 17826), 'numpy.array', 'np.array', (['u_preds'], {}), '(u_preds)\n', (17817, 17826), True, 'import numpy as np\n'), ((17841, 17858), 'numpy.array', 'np.array', (['f_preds'], {}), '(f_preds)\n', (17849, 17858), True, 'import numpy as np\n'), ((18426, 18454), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.8, 6)'}), '(figsize=(6.8, 6))\n', (18436, 18454), True, 'import matplotlib.pyplot as plt\n'), ((18459, 18520), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""x"""', 'style': '"""sci"""', 'scilimits': '(3, 3)'}), "(axis='x', style='sci', scilimits=(3, 3))\n", (18479, 18520), True, 'import matplotlib.pyplot as plt\n'), ((18597, 18632), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x_coords', 'loss_values'], {}), '(x_coords, loss_values)\n', (18609, 18632), True, 'import matplotlib.pyplot as plt\n'), ((18834, 18927), 'matplotlib.pyplot.annotate', 'plt.annotate', (["('(%d, %.3e)' % init_tuple)"], {'xy': 'init_tuple', 'fontsize': 'annotatesize', 'ha': '"""left"""'}), "('(%d, %.3e)' % init_tuple, xy=init_tuple, fontsize=\n annotatesize, ha='left')\n", (18846, 18927), True, 'import matplotlib.pyplot as plt\n'), ((18970, 19074), 'matplotlib.pyplot.annotate', 'plt.annotate', (["('(%d, %.3e)' % last_tuple)"], {'xy': 'last_tuple', 'fontsize': 'annotatesize', 'ha': '"""right"""', 'va': '"""top"""'}), "('(%d, %.3e)' % last_tuple, xy=last_tuple, fontsize=\n annotatesize, ha='right', va='top')\n", (18982, 19074), True, 'import matplotlib.pyplot as plt\n'), ((19074, 19168), 'matplotlib.pyplot.plot', 'plt.plot', (['[init_tuple[0], last_tuple[0]]', '[init_tuple[1], last_tuple[1]]', '"""."""'], {'c': '"""#3B75AF"""'}), "([init_tuple[0], last_tuple[0]], [init_tuple[1], last_tuple[1]],\n '.', c='#3B75AF')\n", (19082, 19168), True, 'import matplotlib.pyplot as plt\n'), ((19484, 19529), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{dirpath}/forward_2d_loss.pdf"""'], {}), "(f'{dirpath}/forward_2d_loss.pdf')\n", (19495, 19529), True, 'import matplotlib.pyplot as plt\n'), ((19534, 19548), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (19543, 19548), True, 'import matplotlib.pyplot as plt\n'), ((19760, 19786), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (19770, 19786), True, 'import matplotlib.pyplot as plt\n'), ((19791, 19852), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""x"""', 'style': '"""sci"""', 'scilimits': '(3, 3)'}), "(axis='x', style='sci', scilimits=(3, 3))\n", (19811, 19852), True, 'import matplotlib.pyplot as plt\n'), ((20025, 20041), 'numpy.array', 'np.array', (['u_mses'], {}), '(u_mses)\n', (20033, 20041), True, 'import numpy as np\n'), ((20046, 20082), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x_coords', 'u_mses', '""".-"""'], {}), "(x_coords, u_mses, '.-')\n", (20058, 20082), True, 'import matplotlib.pyplot as plt\n'), ((20262, 20352), 'matplotlib.pyplot.annotate', 'plt.annotate', (["('(%d, %.3e)' % annots[0])"], {'xy': 'annots[0]', 'fontsize': 'annotatesize', 'ha': '"""left"""'}), "('(%d, %.3e)' % annots[0], xy=annots[0], fontsize=annotatesize,\n ha='left')\n", (20274, 20352), True, 'import matplotlib.pyplot as plt\n'), ((20353, 20457), 'matplotlib.pyplot.annotate', 'plt.annotate', (["('(%d, %.3e)' % annots[-1])"], {'xy': 'annots[-1]', 'fontsize': 'annotatesize', 'ha': '"""right"""', 'va': '"""top"""'}), "('(%d, %.3e)' % annots[-1], xy=annots[-1], fontsize=\n annotatesize, ha='right', va='top')\n", (20365, 20457), True, 'import matplotlib.pyplot as plt\n'), ((20526, 20570), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{dirpath}/forward_2d_mse.pdf"""'], {}), "(f'{dirpath}/forward_2d_mse.pdf')\n", (20537, 20570), True, 'import matplotlib.pyplot as plt\n'), ((20575, 20589), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (20584, 20589), True, 'import matplotlib.pyplot as plt\n'), ((20781, 20793), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20791, 20793), True, 'import matplotlib.pyplot as plt\n'), ((20803, 20840), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {'projection': '"""3d"""'}), "(1, 2, 1, projection='3d')\n", (20814, 20840), True, 'import matplotlib.pyplot as plt\n'), ((21127, 21164), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {'projection': '"""3d"""'}), "(1, 2, 2, projection='3d')\n", (21138, 21164), True, 'import matplotlib.pyplot as plt\n'), ((21456, 21504), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{dirpath}/forward_2d_surface.pdf"""'], {}), "(f'{dirpath}/forward_2d_surface.pdf')\n", (21467, 21504), True, 'import matplotlib.pyplot as plt\n'), ((21509, 21523), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (21518, 21523), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1547), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.xb.shape[1]]'}), '(tf.float32, shape=[None, self.xb.shape[1]])\n', (1503, 1547), True, 'import tensorflow as tf\n'), ((1569, 1627), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.yb.shape[1]]'}), '(tf.float32, shape=[None, self.yb.shape[1]])\n', (1583, 1627), True, 'import tensorflow as tf\n'), ((1649, 1707), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.x0.shape[1]]'}), '(tf.float32, shape=[None, self.x0.shape[1]])\n', (1663, 1707), True, 'import tensorflow as tf\n'), ((1729, 1787), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.xe.shape[1]]'}), '(tf.float32, shape=[None, self.xe.shape[1]])\n', (1743, 1787), True, 'import tensorflow as tf\n'), ((1809, 1867), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.y0.shape[1]]'}), '(tf.float32, shape=[None, self.y0.shape[1]])\n', (1823, 1867), True, 'import tensorflow as tf\n'), ((1889, 1947), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ye.shape[1]]'}), '(tf.float32, shape=[None, self.ye.shape[1]])\n', (1903, 1947), True, 'import tensorflow as tf\n'), ((1969, 2027), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ul.shape[1]]'}), '(tf.float32, shape=[None, self.ul.shape[1]])\n', (1983, 2027), True, 'import tensorflow as tf\n'), ((2049, 2107), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ur.shape[1]]'}), '(tf.float32, shape=[None, self.ur.shape[1]])\n', (2063, 2107), True, 'import tensorflow as tf\n'), ((2129, 2187), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ub.shape[1]]'}), '(tf.float32, shape=[None, self.ub.shape[1]])\n', (2143, 2187), True, 'import tensorflow as tf\n'), ((2209, 2267), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ut.shape[1]]'}), '(tf.float32, shape=[None, self.ut.shape[1]])\n', (2223, 2267), True, 'import tensorflow as tf\n'), ((2742, 2805), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.xf_grid.shape[1]]'}), '(tf.float32, shape=[None, self.xf_grid.shape[1]])\n', (2756, 2805), True, 'import tensorflow as tf\n'), ((2832, 2895), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.yf_grid.shape[1]]'}), '(tf.float32, shape=[None, self.yf_grid.shape[1]])\n', (2846, 2895), True, 'import tensorflow as tf\n'), ((2918, 2944), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2932, 2944), True, 'import tensorflow as tf\n'), ((4944, 4956), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4954, 4956), True, 'import tensorflow as tf\n'), ((5099, 5132), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5130, 5132), True, 'import tensorflow as tf\n'), ((6224, 6255), 'numpy.sqrt', 'np.sqrt', (['(2 / (in_dim + out_dim))'], {}), '(2 / (in_dim + out_dim))\n', (6231, 6255), True, 'import numpy as np\n'), ((7198, 7218), 'tensorflow.concat', 'tf.concat', (['[x, y]', '(1)'], {}), '([x, y], 1)\n', (7207, 7218), True, 'import tensorflow as tf\n'), ((9131, 9167), 'numpy.reshape', 'np.reshape', (['u_test', '(N_test, N_test)'], {}), '(u_test, (N_test, N_test))\n', (9141, 9167), True, 'import numpy as np\n'), ((9374, 9410), 'numpy.reshape', 'np.reshape', (['u_pred', '(N_test, N_test)'], {}), '(u_pred, (N_test, N_test))\n', (9384, 9410), True, 'import numpy as np\n'), ((12126, 12172), 'numpy.linspace', 'np.linspace', (['lowerbound[0]', 'upperbound[0]', 'N_b'], {}), '(lowerbound[0], upperbound[0], N_b)\n', (12137, 12172), True, 'import numpy as np\n'), ((12230, 12276), 'numpy.linspace', 'np.linspace', (['lowerbound[1]', 'upperbound[1]', 'N_b'], {}), '(lowerbound[1], upperbound[1], N_b)\n', (12241, 12276), True, 'import numpy as np\n'), ((13368, 13414), 'numpy.linspace', 'np.linspace', (['lowerbound[0]', 'upperbound[0]', 'N_f'], {}), '(lowerbound[0], upperbound[0], N_f)\n', (13379, 13414), True, 'import numpy as np\n'), ((13456, 13502), 'numpy.linspace', 'np.linspace', (['lowerbound[1]', 'upperbound[1]', 'N_f'], {}), '(lowerbound[1], upperbound[1], N_f)\n', (13467, 13502), True, 'import numpy as np\n'), ((13691, 13740), 'numpy.linspace', 'np.linspace', (['lowerbound[0]', 'upperbound[0]', 'N_test'], {}), '(lowerbound[0], upperbound[0], N_test)\n', (13702, 13740), True, 'import numpy as np\n'), ((13789, 13838), 'numpy.linspace', 'np.linspace', (['lowerbound[1]', 'upperbound[1]', 'N_test'], {}), '(lowerbound[1], upperbound[1], N_test)\n', (13800, 13838), True, 'import numpy as np\n'), ((14904, 14936), 'numpy.concatenate', 'np.concatenate', (['(x0, xe, xb, xb)'], {}), '((x0, xe, xb, xb))\n', (14918, 14936), True, 'import numpy as np\n'), ((14935, 14967), 'numpy.concatenate', 'np.concatenate', (['(yb, yb, y0, ye)'], {}), '((yb, yb, y0, ye))\n', (14949, 14967), True, 'import numpy as np\n'), ((16415, 16437), 'json.dump', 'json.dump', (['dataDict', 'f'], {}), '(dataDict, f)\n', (16424, 16437), False, 'import time, sys, os, json\n'), ((17770, 17781), 'time.time', 'time.time', ([], {}), '()\n', (17779, 17781), False, 'import time, sys, os, json\n'), ((18216, 18250), 'numpy.linalg.norm', 'np.linalg.norm', (['(u_pred - u_test)', '(2)'], {}), '(u_pred - u_test, 2)\n', (18230, 18250), True, 'import numpy as np\n'), ((18253, 18278), 'numpy.linalg.norm', 'np.linalg.norm', (['u_test', '(2)'], {}), '(u_test, 2)\n', (18267, 18278), True, 'import numpy as np\n'), ((20885, 20921), 'numpy.reshape', 'np.reshape', (['u_test', '(N_test, N_test)'], {}), '(u_test, (N_test, N_test))\n', (20895, 20921), True, 'import numpy as np\n'), ((21209, 21245), 'numpy.reshape', 'np.reshape', (['u_pred', '(N_test, N_test)'], {}), '(u_pred, (N_test, N_test))\n', (21219, 21245), True, 'import numpy as np\n'), ((22745, 22777), 'json.dump', 'json.dump', (['infoDict', 'f'], {'indent': '(4)'}), '(infoDict, f, indent=4)\n', (22754, 22777), False, 'import time, sys, os, json\n'), ((2316, 2376), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ul_x.shape[1]]'}), '(tf.float32, shape=[None, self.ul_x.shape[1]])\n', (2330, 2376), True, 'import tensorflow as tf\n'), ((2404, 2464), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ur_x.shape[1]]'}), '(tf.float32, shape=[None, self.ur_x.shape[1]])\n', (2418, 2464), True, 'import tensorflow as tf\n'), ((2492, 2552), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ub_y.shape[1]]'}), '(tf.float32, shape=[None, self.ub_y.shape[1]])\n', (2506, 2552), True, 'import tensorflow as tf\n'), ((2580, 2640), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.ut_y.shape[1]]'}), '(tf.float32, shape=[None, self.ut_y.shape[1]])\n', (2594, 2640), True, 'import tensorflow as tf\n'), ((6476, 6536), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[in_dim, out_dim]'], {'stddev': 'xavier_stddev'}), '([in_dim, out_dim], stddev=xavier_stddev)\n', (6495, 6536), True, 'import tensorflow as tf\n'), ((7092, 7107), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (7101, 7107), True, 'import tensorflow as tf\n'), ((7390, 7408), 'tensorflow.gradients', 'tf.gradients', (['u', 'x'], {}), '(u, x)\n', (7402, 7408), True, 'import tensorflow as tf\n'), ((7427, 7447), 'tensorflow.gradients', 'tf.gradients', (['u_x', 'x'], {}), '(u_x, x)\n', (7439, 7447), True, 'import tensorflow as tf\n'), ((7465, 7483), 'tensorflow.gradients', 'tf.gradients', (['u', 'y'], {}), '(u, y)\n', (7477, 7483), True, 'import tensorflow as tf\n'), ((7502, 7522), 'tensorflow.gradients', 'tf.gradients', (['u_y', 'y'], {}), '(u_y, y)\n', (7514, 7522), True, 'import tensorflow as tf\n'), ((9199, 9208), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9206, 9208), True, 'import matplotlib.pyplot as plt\n'), ((9442, 9451), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9449, 9451), True, 'import matplotlib.pyplot as plt\n'), ((9669, 9692), 'numpy.abs', 'np.abs', (['(u_pred - u_test)'], {}), '(u_pred - u_test)\n', (9675, 9692), True, 'import numpy as np\n'), ((9741, 9750), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9748, 9750), True, 'import matplotlib.pyplot as plt\n'), ((15129, 15138), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15136, 15138), True, 'import matplotlib.pyplot as plt\n'), ((18700, 18709), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (18707, 18709), True, 'import matplotlib.pyplot as plt\n'), ((20087, 20096), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20094, 20096), True, 'import matplotlib.pyplot as plt\n'), ((21007, 21016), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (21014, 21016), True, 'import matplotlib.pyplot as plt\n'), ((21336, 21345), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (21343, 21345), True, 'import matplotlib.pyplot as plt\n'), ((4694, 4742), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr_tf'}), '(learning_rate=self.lr_tf)\n', (4716, 4742), True, 'import tensorflow as tf\n'), ((3958, 3980), 'tensorflow.square', 'tf.square', (['self.f_pred'], {}), '(self.f_pred)\n', (3967, 3980), True, 'import tensorflow as tf\n'), ((4415, 4437), 'tensorflow.square', 'tf.square', (['self.f_pred'], {}), '(self.f_pred)\n', (4424, 4437), True, 'import tensorflow as tf\n'), ((5716, 5762), 'tensorflow.zeros', 'tf.zeros', (['[1, layers[l + 1]]'], {'dtype': 'tf.float32'}), '([1, layers[l + 1]], dtype=tf.float32)\n', (5724, 5762), True, 'import tensorflow as tf\n'), ((6891, 6906), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (6900, 6906), True, 'import tensorflow as tf\n'), ((3877, 3913), 'tensorflow.square', 'tf.square', (['(self.ut_tf - self.ut_pred)'], {}), '(self.ut_tf - self.ut_pred)\n', (3886, 3913), True, 'import tensorflow as tf\n'), ((4330, 4370), 'tensorflow.square', 'tf.square', (['(self.ut_y_tf - self.ut_y_pred)'], {}), '(self.ut_y_tf - self.ut_y_pred)\n', (4339, 4370), True, 'import tensorflow as tf\n'), ((3796, 3832), 'tensorflow.square', 'tf.square', (['(self.ub_tf - self.ub_pred)'], {}), '(self.ub_tf - self.ub_pred)\n', (3805, 3832), True, 'import tensorflow as tf\n'), ((4249, 4285), 'tensorflow.square', 'tf.square', (['(self.ub_tf - self.ub_pred)'], {}), '(self.ub_tf - self.ub_pred)\n', (4258, 4285), True, 'import tensorflow as tf\n'), ((17182, 17193), 'time.time', 'time.time', ([], {}), '()\n', (17191, 17193), False, 'import time, sys, os, json\n'), ((3634, 3670), 'tensorflow.square', 'tf.square', (['(self.ul_tf - self.ul_pred)'], {}), '(self.ul_tf - self.ul_pred)\n', (3643, 3670), True, 'import tensorflow as tf\n'), ((3715, 3751), 'tensorflow.square', 'tf.square', (['(self.ur_tf - self.ur_pred)'], {}), '(self.ur_tf - self.ur_pred)\n', (3724, 3751), True, 'import tensorflow as tf\n'), ((4083, 4123), 'tensorflow.square', 'tf.square', (['(self.ul_x_tf - self.ul_x_pred)'], {}), '(self.ul_x_tf - self.ul_x_pred)\n', (4092, 4123), True, 'import tensorflow as tf\n'), ((4168, 4204), 'tensorflow.square', 'tf.square', (['(self.ur_tf - self.ur_pred)'], {}), '(self.ur_tf - self.ur_pred)\n', (4177, 4204), True, 'import tensorflow as tf\n')] |
author = "eanorambuena"
author_email = "<EMAIL>"
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from eggdriver import Matrix, Vector
from adam.error import ValueError
class vector(Vector):
"""x = vector("[1 2 3]") -> vector
Convert a number, polynomial, matrix, vector, or string to a vector.
If x is a number, return [x]
Eg:
input: output:
y = [3, 5] [3 5]
print(y)
"""
def __init__(self, *args):
super().__init__(*args)
class matrix(Matrix):
"""x = matrix('''
| 1 0 |
| 2 3 |
''') -> matrix
Convert a number, polynomial, matrix, vector, or string to a matrix.
If [x y] is a vector, return
| x |
| y |
Eg:
input: output:
y = $ | 1 0 |
|1 0| | 2 3 |
|0 1|
$
print(y)
"""
def __init__(self, *args):
args = list(args)
if type(args[0]) == str:
args[0] = args[0].strip("\n")
super().__init__(*args)
def dot(self, b):
import numpy as np
a = np.array(self)
b2 = np.array(b)
c = a @ b2
temp =[]
for i in c:
temp.append(vector(i))
return matrix(temp)
def plus(self, b):
if self.n != b.n or self.m != b.m:
ValueError(None, "Matrices must be of equal dimensions to add")
return None
temp = []
for i in range(len(self)):
u = vector(self[i])
v = vector(b[i])
temp.append(u.plus(v))
return matrix(temp)
def __str__(self):
self.display()
return ""
| [
"numpy.array",
"adam.error.ValueError"
] | [((1945, 1959), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (1953, 1959), True, 'import numpy as np\n'), ((1967, 1978), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (1975, 1978), True, 'import numpy as np\n'), ((2135, 2198), 'adam.error.ValueError', 'ValueError', (['None', '"""Matrices must be of equal dimensions to add"""'], {}), "(None, 'Matrices must be of equal dimensions to add')\n", (2145, 2198), False, 'from adam.error import ValueError\n')] |
import numpy as np
def one_hot(y, nb_classes):
""" one_hot
向量转one-hot
Arguments:
y: 带转换的向量
nb_classes: int 类别数
"""
y = np.asarray(y, dtype='int32')
if not nb_classes:
nb_classes = np.max(y) + 1
Y = np.zeros((len(y), nb_classes))
Y[np.arange(len(y)), y] = 1.
return Y
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post',
truncating='post', value=0.):
""" pad_sequences
把序列长度转变为一样长的,如果设置了maxlen则长度统一为maxlen,如果没有设置则默认取
最大的长度。填充和截取包括两种方法,post与pre,post指从尾部开始处理,pre指从头部
开始处理,默认都是从尾部开始。
Arguments:
sequences: 序列
maxlen: int 最大长度
dtype: 转变后的数据类型
padding: 填充方法'pre' or 'post'
truncating: 截取方法'pre' or 'post'
value: float 填充的值
Returns:
x: numpy array 填充后的序列维度为 (number_of_sequences, maxlen)
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError("Truncating type '%s' not understood" % padding)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError("Padding type '%s' not understood" % padding)
return x
def seq_padding(X, padding=0):
L = [len(x) for x in X]
ML = max(L)
return np.array([
np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X
])
def shuffle(*arrs, seed=0):
""" shuffle
Shuffle 数据
Arguments:
*arrs: 数组数据
Returns:
shuffle后的数据
"""
# arrs = list(arrs)
# for i, arr in enumerate(arrs):
# assert len(arrs[0]) == len(arrs[i])
# arrs[i] = np.array(arr)
np.random.seed(seed)
p = np.random.permutation(arrs)
return p
| [
"numpy.ones",
"numpy.asarray",
"numpy.max",
"numpy.random.seed",
"numpy.random.permutation"
] | [((160, 188), 'numpy.asarray', 'np.asarray', (['y'], {'dtype': '"""int32"""'}), "(y, dtype='int32')\n", (170, 188), True, 'import numpy as np\n'), ((2120, 2140), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2134, 2140), True, 'import numpy as np\n'), ((2149, 2176), 'numpy.random.permutation', 'np.random.permutation', (['arrs'], {}), '(arrs)\n', (2170, 2176), True, 'import numpy as np\n'), ((993, 1008), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (999, 1008), True, 'import numpy as np\n'), ((233, 242), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (239, 242), True, 'import numpy as np\n'), ((1019, 1048), 'numpy.ones', 'np.ones', (['(nb_samples, maxlen)'], {}), '((nb_samples, maxlen))\n', (1026, 1048), True, 'import numpy as np\n')] |
import sys
import pygame
def signal_handler(sig, frame):
print('Procedure terminated!')
pygame.display.quit()
pygame.quit()
sys.exit(0)
from scipy.interpolate import interp1d
## This function provides a prospective lateral-coordinate generator w.r.t possible longitudinal coordinates
## for the ego vehicle in Scenario 0, which can be taken as a demonstration
def get_path():
waypoint_x_mark = np.array([200,212.5,225,237.5,250,300])
waypoint_y_mark = np.array([335,336.5,338,337.5,335,334])
pathgenerator = interp1d(waypoint_x_mark, waypoint_y_mark,kind='cubic')
return pathgenerator
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
def weights_init_normal(m):
'''Takes in a module and initializes all linear layers with weight
values taken from a normal distribution.'''
classname = m.__class__.__name__
# for every Linear layer in a model
if classname.find('Linear') != -1:
y = m.in_features
# m.weight.data shoud be taken from a normal distribution
m.weight.data.normal_(0.0,1/np.sqrt(y))
# m.bias.data should be 0
m.bias.data.fill_(0)
class NET(nn.Module):
def __init__(self):
super(NET, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1,6,6)
self.conv2 = nn.Conv2d(6,16,6)
self.fc = nn.Linear(16*7*16, 128)
def forward(self, x):
x = x.unsqueeze(1)
x = F.max_pool2d( self.conv1(x),2)
x = F.max_pool2d( self.conv2(x),2)
x = x.view(x.size(0),16*16*7)
x = self.fc(x)
x = self.relu(x)
return x
class RND(nn.Module):
def __init__(self, use_cuda = True):
super(RND, self).__init__()
self.use_cuda = use_cuda
self.fix = NET()
self.estimator = NET()
self.fix.apply(weights_init_normal)
self.estimator.apply(weights_init_normal)
self.criterion = nn.MSELoss()
self.optim = optim.Adam(self.estimator.parameters(),0.0001)
if self.use_cuda:
self.fix.cuda()
self.estimator.cuda()
def forward(self, state):
state = torch.tensor(state, dtype=torch.float).reshape(1,45,80)
if self.use_cuda:
state = state.cuda()
target = self.fix.forward(state)
estimate = self.estimator.forward(state)
loss = self.criterion(estimate, target)
self.optim.zero_grad()
loss.backward()
self.optim.step()
error = loss.item()
mu = torch.mean(target)
std = torch.std(target)
return error, mu.detach().cpu().numpy(), std.detach().cpu().numpy()
def get_reward_i(self, state):
error, mu, std = self.forward(state)
alpha = 1+(error-mu)/std
x = min( max(alpha, 1), 2)
return x
| [
"torch.nn.ReLU",
"numpy.sqrt",
"pygame.quit",
"torch.mean",
"scipy.interpolate.interp1d",
"torch.nn.Conv2d",
"numpy.array",
"pygame.display.quit",
"torch.nn.MSELoss",
"torch.tensor",
"torch.nn.Linear",
"sys.exit",
"torch.std"
] | [((96, 117), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (115, 117), False, 'import pygame\n'), ((122, 135), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (133, 135), False, 'import pygame\n'), ((140, 151), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (148, 151), False, 'import sys\n'), ((417, 461), 'numpy.array', 'np.array', (['[200, 212.5, 225, 237.5, 250, 300]'], {}), '([200, 212.5, 225, 237.5, 250, 300])\n', (425, 461), True, 'import numpy as np\n'), ((479, 523), 'numpy.array', 'np.array', (['[335, 336.5, 338, 337.5, 335, 334]'], {}), '([335, 336.5, 338, 337.5, 335, 334])\n', (487, 523), True, 'import numpy as np\n'), ((539, 595), 'scipy.interpolate.interp1d', 'interp1d', (['waypoint_x_mark', 'waypoint_y_mark'], {'kind': '"""cubic"""'}), "(waypoint_x_mark, waypoint_y_mark, kind='cubic')\n", (547, 595), False, 'from scipy.interpolate import interp1d\n'), ((1304, 1313), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1311, 1313), True, 'import torch.nn as nn\n'), ((1335, 1353), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(6)', '(6)'], {}), '(1, 6, 6)\n', (1344, 1353), True, 'import torch.nn as nn\n'), ((1373, 1392), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(6)'], {}), '(6, 16, 6)\n', (1382, 1392), True, 'import torch.nn as nn\n'), ((1409, 1436), 'torch.nn.Linear', 'nn.Linear', (['(16 * 7 * 16)', '(128)'], {}), '(16 * 7 * 16, 128)\n', (1418, 1436), True, 'import torch.nn as nn\n'), ((2060, 2072), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2070, 2072), True, 'import torch.nn as nn\n'), ((2713, 2731), 'torch.mean', 'torch.mean', (['target'], {}), '(target)\n', (2723, 2731), False, 'import torch\n'), ((2746, 2763), 'torch.std', 'torch.std', (['target'], {}), '(target)\n', (2755, 2763), False, 'import torch\n'), ((1129, 1139), 'numpy.sqrt', 'np.sqrt', (['y'], {}), '(y)\n', (1136, 1139), True, 'import numpy as np\n'), ((2302, 2340), 'torch.tensor', 'torch.tensor', (['state'], {'dtype': 'torch.float'}), '(state, dtype=torch.float)\n', (2314, 2340), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.colors as colors
from scipy.integrate import cumtrapz, quad
from scipy.interpolate import interp1d
from scipy.stats import chi2
import PlottingTools as PT
import argparse
import os
#---------------
# MATPLOTLIB settings
mpl.rcParams.update({'font.size': 18,'font.family':'serif'})
mpl.rcParams['xtick.major.size'] = 7
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 7
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rc('text', usetex=True)
mpl.rcParams['legend.edgecolor'] = 'inherit'
#---------------
parser = argparse.ArgumentParser(description='...')
parser.add_argument('-runID', '--runID', help='Text ID for the results to be plotted', type=str, default="Final")
parser.add_argument('-m_x', '--m_x', help='DM mass in GeV', type=float, default = 0.2)
parser.add_argument('-hemisphere','--hemisphere', help='Hemisphere of the experiment (N or S)', type=str, default="N")
args = parser.parse_args()
runID = args.runID
hemisphere = args.hemisphere
m_x = args.m_x #DM mass in GeV
m_str = str(int(m_x*1000)) #String of DM mass in MeV
fig = plt.figure(figsize = (7,5))
ax = plt.gca()
ax.set_xscale("log")
#ax.set_yscale("log")
plt.xlabel(r"$\sigma_p^\mathrm{SI}$ [cm$^2$]")
plt.ylabel(r"$\rho_\chi$ [GeV/cm$^3$]")
if (hemisphere == "N"):
lat_text = "Northern Hemisphere ($46^\circ$N)"
elif (hemisphere == "S"):
lat_text = "Southern Hemisphere ($37^\circ$S)"
#45.5 N
#37.1 S
plt.title(r"$m_\chi' = " + m_str + " \,\mathrm{MeV}$ (fixed); "+ lat_text)# + data_str)
#List of cross section to plot for
sig_list = np.logspace(-35.5, -30.5, 11)
print("> Plotting results for runID:", runID, " (Hemisphere:",hemisphere, ")")
for i, sig in enumerate(sig_list):
print(" Cross section:", sig)
IDstr = runID + "3_" + hemisphere
PT.plotContour_nomodulation(m_x, sig, IDstr, col="C4", ls='dashed', fixed_mass=True)
IDstr = runID + "1_" + hemisphere
PT.plotContour_modulation(m_x, sig, IDstr, col="C0", ls='solid', fixed_mass=True)
plt.plot(sig, 0.4, color='k', marker="+", mew=2, markersize=8)
proxy_w = plt.Rectangle((-1,-1),1,1,fc = 'C0', alpha=0.8, edgecolor='C0', linewidth=1.5, linestyle='-')
proxy_wo = plt.Rectangle((-1,-1),1,1,fc = 'C4', alpha=0.8, edgecolor='C4', linewidth=1.5, linestyle='--')
plt.legend([proxy_wo, proxy_w], ["Energy-only", "Energy+timing"], loc='upper right',framealpha=0.9)
#plt.axvline(1e-34, linestyle='--', color='k')
#plt.axvline(1e-33, linestyle='--', color='k')
plt.xlim(1e-36, 2e-30)
#plt.xlim(3e-36, 3e-34)
plt.ylim(0, 1e0)
#plt.xticks(np.geomspace(1e-37, 1e-30, 8))
plt.savefig("../plots/contour_" + runID + "_" + m_str + "_" + hemisphere + "_all_fixedmass.pdf", bbox_inches='tight')
plt.show()
| [
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.rcParams.update",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"PlottingTools.plotContour_modulation",
"PlottingTools.plotContour_nomodulation... | [((319, 381), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'font.size': 18, 'font.family': 'serif'}"], {}), "({'font.size': 18, 'font.family': 'serif'})\n", (338, 381), True, 'import matplotlib as mpl\n'), ((865, 892), 'matplotlib.rc', 'mpl.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (871, 892), True, 'import matplotlib as mpl\n'), ((967, 1009), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""..."""'}), "(description='...')\n", (990, 1009), False, 'import argparse\n'), ((1500, 1526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (1510, 1526), True, 'import matplotlib.pyplot as plt\n'), ((1533, 1542), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1540, 1542), True, 'import matplotlib.pyplot as plt\n'), ((1587, 1634), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\sigma_p^\\\\mathrm{SI}$ [cm$^2$]"""'], {}), "('$\\\\sigma_p^\\\\mathrm{SI}$ [cm$^2$]')\n", (1597, 1634), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1674), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\rho_\\\\chi$ [GeV/cm$^3$]"""'], {}), "('$\\\\rho_\\\\chi$ [GeV/cm$^3$]')\n", (1644, 1674), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1924), 'matplotlib.pyplot.title', 'plt.title', (['("$m_\\\\chi\' = " + m_str + \' \\\\,\\\\mathrm{MeV}$ (fixed); \' + lat_text)'], {}), '("$m_\\\\chi\' = " + m_str + \' \\\\,\\\\mathrm{MeV}$ (fixed); \' + lat_text)\n', (1856, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1982, 2011), 'numpy.logspace', 'np.logspace', (['(-35.5)', '(-30.5)', '(11)'], {}), '(-35.5, -30.5, 11)\n', (1993, 2011), True, 'import numpy as np\n'), ((2503, 2603), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(-1, -1)', '(1)', '(1)'], {'fc': '"""C0"""', 'alpha': '(0.8)', 'edgecolor': '"""C0"""', 'linewidth': '(1.5)', 'linestyle': '"""-"""'}), "((-1, -1), 1, 1, fc='C0', alpha=0.8, edgecolor='C0', linewidth\n =1.5, linestyle='-')\n", (2516, 2603), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2709), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(-1, -1)', '(1)', '(1)'], {'fc': '"""C4"""', 'alpha': '(0.8)', 'edgecolor': '"""C4"""', 'linewidth': '(1.5)', 'linestyle': '"""--"""'}), "((-1, -1), 1, 1, fc='C4', alpha=0.8, edgecolor='C4', linewidth\n =1.5, linestyle='--')\n", (2621, 2709), True, 'import matplotlib.pyplot as plt\n'), ((2718, 2823), 'matplotlib.pyplot.legend', 'plt.legend', (['[proxy_wo, proxy_w]', "['Energy-only', 'Energy+timing']"], {'loc': '"""upper right"""', 'framealpha': '(0.9)'}), "([proxy_wo, proxy_w], ['Energy-only', 'Energy+timing'], loc=\n 'upper right', framealpha=0.9)\n", (2728, 2823), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2946), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1e-36)', '(2e-30)'], {}), '(1e-36, 2e-30)\n', (2932, 2946), True, 'import matplotlib.pyplot as plt\n'), ((2971, 2987), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (2979, 2987), True, 'import matplotlib.pyplot as plt\n'), ((3032, 3153), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../plots/contour_' + runID + '_' + m_str + '_' + hemisphere +\n '_all_fixedmass.pdf')"], {'bbox_inches': '"""tight"""'}), "('../plots/contour_' + runID + '_' + m_str + '_' + hemisphere +\n '_all_fixedmass.pdf', bbox_inches='tight')\n", (3043, 3153), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3160), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3158, 3160), True, 'import matplotlib.pyplot as plt\n'), ((2206, 2294), 'PlottingTools.plotContour_nomodulation', 'PT.plotContour_nomodulation', (['m_x', 'sig', 'IDstr'], {'col': '"""C4"""', 'ls': '"""dashed"""', 'fixed_mass': '(True)'}), "(m_x, sig, IDstr, col='C4', ls='dashed',\n fixed_mass=True)\n", (2233, 2294), True, 'import PlottingTools as PT\n'), ((2338, 2424), 'PlottingTools.plotContour_modulation', 'PT.plotContour_modulation', (['m_x', 'sig', 'IDstr'], {'col': '"""C0"""', 'ls': '"""solid"""', 'fixed_mass': '(True)'}), "(m_x, sig, IDstr, col='C0', ls='solid', fixed_mass\n =True)\n", (2363, 2424), True, 'import PlottingTools as PT\n'), ((2425, 2487), 'matplotlib.pyplot.plot', 'plt.plot', (['sig', '(0.4)'], {'color': '"""k"""', 'marker': '"""+"""', 'mew': '(2)', 'markersize': '(8)'}), "(sig, 0.4, color='k', marker='+', mew=2, markersize=8)\n", (2433, 2487), True, 'import matplotlib.pyplot as plt\n')] |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
A job class for solving the time-independent Schroedinger equation on a discrete mesh.
"""
from pyiron_base import HasStorage
import numpy as np
from typing import Union, List, Callable, Any
BoundsList = List[Union[float, int, List[Union[float, int]]]]
def callable_to_array(method):
"""If the first argument of the method is callable, replaces it with the callable evaluated on self."""
def wrapper(self, fnc, **kwargs):
if callable(fnc):
return method(self, fnc(self), **kwargs)
else:
return method(self, fnc, **kwargs)
return wrapper
def has_default_accuracy(method):
"""Replaces the `accuracy` argument with the instance attribute of the same name if `accuracy` is None."""
def wrapper(self, fnc, accuracy=None, **kwargs):
accuracy = self.accuracy if accuracy is None else accuracy
if accuracy % 2 != 0 or accuracy < 2:
raise ValueError(f'Expected an even, positive accuracy but got {accuracy}')
return method(self, fnc, accuracy=accuracy, **kwargs)
return wrapper
def takes_scalar_field(method):
"""Makes sure the first argument has the right shape to be a scalar field on the mesh."""
def wrapper(self, scalar_field, **kwargs):
scalar_field = np.array(scalar_field)
if np.all(scalar_field.shape == self.divisions):
return method(self, scalar_field, **kwargs)
else:
raise TypeError(f'Argument for {method.__name__} not recognized: should be a scalar field, or function '
f'taking the mesh and returning a scalar field.')
return wrapper
def takes_vector_field(method):
"""Makes sure the first argument has the right shape to be a vector field on the mesh."""
def wrapper(self, vector_field, **kwargs):
vector_field = np.array(vector_field)
if np.all(vector_field.shape == self.shape):
return method(self, vector_field, **kwargs)
else:
raise TypeError(f'Argument for {method.__name__} not recognized: should be a vector field, or function '
f'taking the mesh and returning a vector field.')
return wrapper
class RectMesh(HasStorage):
"""
A helper class for building rectangular meshgrids in n-dimensions.
**Assumes periodic boundary conditions.**
Mesh corresponds to numpy meshgrid with `indexing='ij'` *not* the default `'xy'` indexing. This gives consistency
in behaviour for meshes in >2 dimensions.
Example 1-D)
>>> mesh = RectMesh(2, 2, simplify_1d=True)
>>> mesh.mesh
array([0., 1.])
>>> mesh.steps
array(1.)
Example 2-D)
>>> mesh = RectMesh(bounds=[[0, 1], [2, 5]], divisions=[2, 3])
>>> mesh.mesh
array([[[0. , 0. , 0. ],
[0.5, 0.5, 0.5]],
<BLANKLINE>
[[2. , 3. , 4. ],
[2. , 3. , 4. ]]])
>>> mesh.steps
array([0.5, 1. ])
Note: To get a 1D mesh starting somewhere other than 0, you need an extra set of brackets, i.e.
`bounds=[[start, end]]` as simply using `[start, end]` will actually give you a 2D array `[[0, start], [0, end]]`!
Attributes:
bounds (numpy.ndarray): The start and end point for each dimension.
divisions (numpy.ndarray): How many sampling points in each dimension, i.e. the shape of a scalar field.
dim (int): The dimensionality of the field.
shape (tuple): The shape of the mesh, i.e. the shape of a vector field.
mesh (numpy.ndarray): The spatial sampling points.
steps (numpy.ndarray/float): The step size in each dimension.
lengths (numpy.ndarray/float): How large the domain is in each dimension.
volume (float): The product of the lengths in all dimensions.
accuracy (int): An even value given the number of stencil points to use in each dimension for calculating
derivatives. (Default is 4.)
simplify_1d (bool): Whether to reduce dimension whenever the first dimension is redundant, e.g. [[1,2]]->[1,2].
Methods:
derivative: Calculate the nth order derivative of a scalar field to get a vector field.
grad: Calculate the first derivative of a scalar field to get a vector field.
div: Calculate the divergence of a vector field to get a scalar field.
laplacian: Calculate the Laplacian of a scalar field to get a scalar field.
curl: Calculate the curl of a vector field to get a vector field. (Only for 3d!)
Note: All the mathematical operations can take either a numpy array of the correct dimension *or* a callable that
takes the mesh itself as an argument and returns a numpy array of the correct dimension, where 'correct' is
refering to scalar field (mesh divisions) or vector field (mesh shape).
Warning: Operations over the actual mesh points are all nicely vectorized, but this is by no means highly optimized
for numeric efficiency! If you want to do some really heavy lifting, this is probably the wrong tool.
TODO: Include aperiodic boundary conditions, e.g. padding. Probably by modifying the decorators.
"""
def __init__(
self,
bounds: Union[float, int, BoundsList, np.ndarray] = 1,
divisions: Union[int, List[int], np.ndarray] = 1,
accuracy: int = 4,
simplify_1d: bool = False
):
"""
Instantiate a rectangular mesh.
Args:
bounds (float/list/numpy.ndarray): The upper and lower bounds for each dimension of the mesh. A single
value, L, creates a mesh on [0, L]. A list/array with shape (n<=3,) creates an n-dimensional mesh with
on [0, L_i] for each of the L_i values given. A two-dimensional list/array should have shape (n<=3,2)
and gives lower and upper bounds for each dimension, i.e. `[[0.5, 1]]` makes a 1D mesh on [0.5, 1].
divisions (int/list/numpy.ndarray): How many grid divisions to use in each dimension. An integer will be
mapped up to give that number of divisions on all dimensions provided in `bounds`, otherwise the
dimensionality between the two arguments must agree.
accuracy (int): An even value given the number of stencil points to use in each dimension for calculating
derivatives. (Default is 4.)
simplify_1d (bool): Whether to simplify the output for 1D meshes so they have shape (n,) instead of (1, n).
(Default is False.)
"""
super().__init__()
bounds, divisions = self._clean_input(bounds, divisions)
self.storage.bounds = bounds
self.storage.divisions = divisions
self.storage.accuracy = accuracy
self.storage.simplify_1d = simplify_1d
self._build_mesh()
@property
def bounds(self) -> np.ndarray:
return self.storage.bounds
@bounds.setter
def bounds(self, new_bounds: Union[float, int, BoundsList, np.ndarray]):
new_bounds, _ = self._clean_input(new_bounds, self.divisions)
self.storage.bounds = new_bounds
self._build_mesh()
@property
def divisions(self) -> Union[int, np.ndarray]:
return self.storage.divisions
@divisions.setter
def divisions(self, new_divisions: Union[int, List[int], np.ndarray]):
_, new_divisions = self._clean_input(self.bounds, new_divisions)
self.storage.divisions = new_divisions
self._build_mesh()
@property
def accuracy(self) -> int:
"""
The number of points to use in the stencil for central difference methods. Corresponds to O(h^accuracy)
precision in the derivative operator, where h is the mesh spacing.
"""
return self.storage.accuracy
@accuracy.setter
def accuracy(self, n: int):
if n % 2 != 0:
raise ValueError(f'Expected an even integer but got {2}')
self.storage.accuracy = int(n)
@property
def simplify_1d(self) -> bool:
return self.storage.simplify_1d
@simplify_1d.setter
def simplify_1d(self, simplify: bool):
self.storage.simplify_1d = simplify
def _simplify_1d(self, x: np.ndarray) -> Union[int, float, np.ndarray]:
if len(x) == 1 and self.storage.simplify_1d:
return np.squeeze(x)
else:
return x
@property
def mesh(self) -> np.ndarray:
return self._simplify_1d(self.storage.mesh)
@property
def steps(self) -> np.ndarray:
"""Spacing between each mesh point."""
return self._simplify_1d(self.storage.steps)
@property
def shape(self) -> tuple:
return self.mesh.shape
@property
def dim(self) -> int:
"""Dimension of the box, i.e. the zeroth entry of the shape."""
return self.shape[0]
@property
def lengths(self) -> Union[float, np.ndarray]:
"""Edge lengths for each side of the box."""
return self._simplify_1d(self.bounds.ptp(axis=-1))
@property
def volume(self):
"""Volume encompassed by all the dimensions, i.e. product of the lengths."""
return self.lengths.prod()
def __len__(self):
return self.divisions.prod()
def _build_mesh(self) -> None:
linspaces = []
steps = []
for bound, ndiv in zip(self.storage.bounds, self.storage.divisions):
space, step = np.linspace(bound[0], bound[1], num=ndiv, endpoint=False, retstep=True)
linspaces.append(space)
steps.append(step)
mesh = np.meshgrid(*linspaces, indexing='ij')
self.storage.steps = np.array(steps)
self.storage.mesh = np.array(mesh)
def _clean_input(
self,
bounds: Union[float, int, BoundsList, np.ndarray],
divisions: Union[int, List[int], np.ndarray]
) -> (np.ndarray, np.ndarray):
if not hasattr(bounds, '__len__'):
bounds = [[0, bounds]]
bounds = np.array(bounds) # Enforce array to guarantee `shape`
if len(bounds.shape) == 1:
bounds = np.array([[0, b] for b in bounds])
elif len(bounds.shape) > 2 or bounds.shape[-1] > 2:
raise ValueError(f'Bounds must be of the shape (n,) or (n, 2), but got {bounds.shape}')
if np.any(np.isclose(bounds.ptp(axis=-1), 0)):
raise ValueError(f'Bounds must be finite length in all dimensions, but found lengths {bounds.ptp(axis=-1)}')
if hasattr(divisions, '__len__'):
if len(divisions) != len(bounds):
raise ValueError(
f'Divisions must be a single value or have the same length as bounds but got {len(divisions)} and '
f'{len(bounds)}'
)
elif np.any([not self._is_int(div) for div in divisions]):
raise TypeError(f'Divisions must all be int-like, but got {divisions}')
elif self._is_int(divisions):
divisions = len(bounds) * [divisions]
else:
raise TypeError(
f'Expected divisions to be int-like or a list-like objects of ints the same length as bounds, but got '
f'{divisions}'
)
return bounds, np.array(divisions)
@staticmethod
def _is_int(val: Any) -> bool:
return np.issubdtype(type(val), np.integer)
@staticmethod
def _get_central_difference_coefficients(m, n):
"""
Coefficients for central finite difference numeric differentiation.
Args:
m (int): Order of differential.
n (int): Accuracy of method, i.e. precision as a power of grid spacing.
Returns:
(numpy.ndarray): Coefficients for numeric differentials sorted by order of differential and accuracy of
method.
"""
if n % 2 != 0:
raise ValueError('`n` must be an even number')
p = int(0.5 * (m + 1)) - 1 + int(0.5 * n)
b = np.zeros(2 * p + 1)
b[m] = np.prod(np.arange(m) + 1)
return np.linalg.solve(np.arange(-p, p + 1) ** np.arange(0, 2 * p + 1)[:, None], b)
# OPERATIONS:
@callable_to_array
@takes_scalar_field
def _numpy_gradient(self, scalar_field: Union[Callable, np.ndarray], axis=None, edge_order=1):
return np.gradient(scalar_field, *self.steps, axis=axis, edge_order=edge_order)
@callable_to_array
@has_default_accuracy
@takes_scalar_field
def derivative(self, scalar_field: Union[Callable, np.ndarray], order: int = 1, accuracy: int = None):
"""
Numeric differential for a uniform grid using the central difference method.
Args:
scalar_field (function/numpy.ndarray): A function taking this `RectMesh` object and returning a scalar
field, or the scalar field as an array.
order (int): The derivative to take. (Default is 1, take first derivative.)
accuracy (int): The accuracy of the method in O(grid spacing). (Default is None, which falls back on the
class attribute of the same name.)
Returns:
(numpy.ndarray): The vector field derivative of the scalar input at each point on the grid in each
dimension. E.g. for a scalar field with shape `(nx, ny, nz)`, returns shape `(3, nx, ny, nz)`.
Raises:
(KeyError): If the requested order or accuracy cannot be found.
"""
coefficients = self._get_central_difference_coefficients(order, accuracy)
max_roll = (len(coefficients) - 1) / 2
rolls = np.flip(np.arange(-max_roll, max_roll + 1, dtype=int))
res = np.zeros(self.shape)
for ax, h in enumerate(self.steps):
for n, c in enumerate(coefficients):
if np.isclose(c, 0):
continue
res[ax] += c * np.roll(scalar_field, rolls[n], axis=ax)
res[ax] /= h ** order
return res
@callable_to_array
@has_default_accuracy
@takes_scalar_field
def grad(self, scalar_field: Union[Callable, np.ndarray], accuracy: int = None) -> np.array:
"""
Gradient of a scalar field.
Args:
scalar_field (function/numpy.ndarray): A function taking this `RectMesh` object and returning a scalar
field, or the scalar field as an array.
accuracy (int): The order of approximation in grid spacing. See `central_difference_table` for all choices.
(Default is None, which falls back on the class attribute of the same name.)
Returns:
(numpy.ndarray): The vector field gradient of the scalar input at each point on the mesh in each dimension.
E.g. for a scalar field with shape `(nx, ny, nz)`, returns shape `(3, nx, ny, nz)`.
"""
return self.derivative(scalar_field, order=1, accuracy=accuracy)
@callable_to_array
@has_default_accuracy
@takes_vector_field
def div(self, vector_field: Union[Callable, np.ndarray], accuracy: int = None) -> np.array:
"""
Divergence of a vector field.
Args:
vector_field (function/numpy.ndarray): A function taking this `RectMesh` object and returning a vector
field, or the vector field as an array.
accuracy (int): The order of approximation in grid spacing. See `central_difference_table` for all choices.
(Default is None, which falls back on the class attribute of the same name.)
Returns:
(numpy.ndarray): The scalar field divergence of the vector input at each point on the mesh.
"""
return np.sum([self.derivative(vector_field[ax], accuracy=accuracy)[ax] for ax in np.arange(self.dim)], axis=0)
@callable_to_array
@has_default_accuracy
@takes_scalar_field
def laplacian(self, scalar_field: Union[Callable, np.ndarray], accuracy: int = None) -> np.array:
"""
Discrete Laplacian operator applied to a given function or scalar field.
Args:
scalar_field (function/numpy.ndarray): A function taking this `RectMesh` object and returning a scalar
field, or the scalar field as an array.
accuracy (int): The order of approximation in grid spacing. See `central_difference_table` for all choices.
(Default is None, which falls back on the class attribute of the same name.)
Returns:
(numpy.ndarray): The scalar field Laplacian of the scalar input at each point on the mesh.
"""
return self.derivative(scalar_field, order=2, accuracy=accuracy).sum(axis=0)
@callable_to_array
@has_default_accuracy
@takes_vector_field
def curl(self, vector_field: Union[Callable, np.ndarray], accuracy: int = None) -> np.array:
"""
Curl of a vector field.
Note: Only works for 3d vector fields!
Args:
vector_field (function/numpy.ndarray): A function taking this `RectMesh` object and returning a 3d vector
field, or the 3d vector field as an array.
accuracy (int): The order of approximation in grid spacing. See `central_difference_table` for all choices.
(Default is None, which falls back on the class attribute of the same name.)
Returns:
(numpy.ndarray): The vector field curl of the vector input at each point on the mesh in all three
dimensions. I.e. for a vector field with shape `(3, nx, ny, nz)`, returns shape `(3, nx, ny, nz)`.
Raises:
(NotImplementedError): If the vector field provided is not three dimensional.
"""
if self.dim != 3:
raise NotImplementedError("I'm no mathematician, so curl is only coded for the traditional 3d space.")
grads = np.array([self.derivative(vf, accuracy=accuracy) for vf in vector_field])
pos = np.array([grads[(2 + i) % self.dim][(1 + i) % self.dim] for i in range(self.dim)])
neg = np.array([grads[(1 + i) % self.dim][(2 + i) % self.dim] for i in range(self.dim)])
return pos - neg
| [
"numpy.isclose",
"numpy.roll",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.meshgrid",
"numpy.all",
"numpy.gradient",
"numpy.arange"
] | [((1476, 1498), 'numpy.array', 'np.array', (['scalar_field'], {}), '(scalar_field)\n', (1484, 1498), True, 'import numpy as np\n'), ((1510, 1554), 'numpy.all', 'np.all', (['(scalar_field.shape == self.divisions)'], {}), '(scalar_field.shape == self.divisions)\n', (1516, 1554), True, 'import numpy as np\n'), ((2038, 2060), 'numpy.array', 'np.array', (['vector_field'], {}), '(vector_field)\n', (2046, 2060), True, 'import numpy as np\n'), ((2072, 2112), 'numpy.all', 'np.all', (['(vector_field.shape == self.shape)'], {}), '(vector_field.shape == self.shape)\n', (2078, 2112), True, 'import numpy as np\n'), ((9797, 9835), 'numpy.meshgrid', 'np.meshgrid', (['*linspaces'], {'indexing': '"""ij"""'}), "(*linspaces, indexing='ij')\n", (9808, 9835), True, 'import numpy as np\n'), ((9865, 9880), 'numpy.array', 'np.array', (['steps'], {}), '(steps)\n', (9873, 9880), True, 'import numpy as np\n'), ((9909, 9923), 'numpy.array', 'np.array', (['mesh'], {}), '(mesh)\n', (9917, 9923), True, 'import numpy as np\n'), ((10215, 10231), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (10223, 10231), True, 'import numpy as np\n'), ((12217, 12236), 'numpy.zeros', 'np.zeros', (['(2 * p + 1)'], {}), '(2 * p + 1)\n', (12225, 12236), True, 'import numpy as np\n'), ((12550, 12622), 'numpy.gradient', 'np.gradient', (['scalar_field', '*self.steps'], {'axis': 'axis', 'edge_order': 'edge_order'}), '(scalar_field, *self.steps, axis=axis, edge_order=edge_order)\n', (12561, 12622), True, 'import numpy as np\n'), ((13903, 13923), 'numpy.zeros', 'np.zeros', (['self.shape'], {}), '(self.shape)\n', (13911, 13923), True, 'import numpy as np\n'), ((8547, 8560), 'numpy.squeeze', 'np.squeeze', (['x'], {}), '(x)\n', (8557, 8560), True, 'import numpy as np\n'), ((9642, 9713), 'numpy.linspace', 'np.linspace', (['bound[0]', 'bound[1]'], {'num': 'ndiv', 'endpoint': '(False)', 'retstep': '(True)'}), '(bound[0], bound[1], num=ndiv, endpoint=False, retstep=True)\n', (9653, 9713), True, 'import numpy as np\n'), ((10327, 10361), 'numpy.array', 'np.array', (['[[0, b] for b in bounds]'], {}), '([[0, b] for b in bounds])\n', (10335, 10361), True, 'import numpy as np\n'), ((11475, 11494), 'numpy.array', 'np.array', (['divisions'], {}), '(divisions)\n', (11483, 11494), True, 'import numpy as np\n'), ((13841, 13886), 'numpy.arange', 'np.arange', (['(-max_roll)', '(max_roll + 1)'], {'dtype': 'int'}), '(-max_roll, max_roll + 1, dtype=int)\n', (13850, 13886), True, 'import numpy as np\n'), ((12260, 12272), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (12269, 12272), True, 'import numpy as np\n'), ((12309, 12329), 'numpy.arange', 'np.arange', (['(-p)', '(p + 1)'], {}), '(-p, p + 1)\n', (12318, 12329), True, 'import numpy as np\n'), ((14036, 14052), 'numpy.isclose', 'np.isclose', (['c', '(0)'], {}), '(c, 0)\n', (14046, 14052), True, 'import numpy as np\n'), ((12333, 12356), 'numpy.arange', 'np.arange', (['(0)', '(2 * p + 1)'], {}), '(0, 2 * p + 1)\n', (12342, 12356), True, 'import numpy as np\n'), ((14114, 14154), 'numpy.roll', 'np.roll', (['scalar_field', 'rolls[n]'], {'axis': 'ax'}), '(scalar_field, rolls[n], axis=ax)\n', (14121, 14154), True, 'import numpy as np\n'), ((15992, 16011), 'numpy.arange', 'np.arange', (['self.dim'], {}), '(self.dim)\n', (16001, 16011), True, 'import numpy as np\n')] |
import os
import unittest
import numpy
from mbtr import MolsMBTR2D, read_xyz_crystal, PeriodicMBTR2D
from mbtr import read_xyz_molecule
class TestMolecularMBTR(unittest.TestCase):
def load_xyz(self, filename, n_molecules):
xyz_fn = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
filename,
)
molecules = read_xyz_molecule(xyz_fn)
self.assertEqual(len(molecules), n_molecules)
return molecules
def test_atomic_numbers(self):
mbtr = MolsMBTR2D(grid_size=10)
aspirin = self.load_xyz('aspirin.xyz', 1)
mbtr.fit(aspirin)
arrays, ans = mbtr.transform(aspirin)
self.assertEqual(ans, [1, 6, 8])
def test_tensor_range(self):
aspirin = self.load_xyz('aspirin.xyz', 1)
atom_coordinates = numpy.array(list(x[1:] for x in aspirin[0]['atoms']))
distance_matrix = numpy.linalg.norm(
atom_coordinates.reshape((21, 1, 3)) - atom_coordinates.reshape((1, 21, 3)),
axis=2
)
distances = distance_matrix[numpy.triu_indices(21, 1)]
mbtr = MolsMBTR2D(grid_size=10, smearing_factor=0.001)
mbtr.fit(aspirin)
self.assertAlmostEqual(1 / distances.max(), mbtr.tensor_range[0], delta=0.01)
self.assertAlmostEqual(1 / distances.min(), mbtr.tensor_range[1], delta=0.01)
def test_mbtr_integral(self):
aspirin = self.load_xyz('aspirin.xyz', 1)
mbtr = MolsMBTR2D(grid_size=10, smearing_factor=0.001)
mbtr.fit(aspirin)
arrays, ans = mbtr.transform(aspirin)
aspirin_array = arrays[0]
aspirin_array_sum = numpy.sum(aspirin_array, axis=2)
# We have 8 H, 9 C, 4 O.
# Diagonals: N(N-1)
# Off-diagonals: NxM
expected = numpy.array([
[56, 72, 32],
[72, 72, 36],
[32, 36, 12]
])
self.assertTrue(numpy.all(
abs(aspirin_array_sum - expected) < 0.01
), msg='Expected {0}, but got {1}'.format(expected, aspirin_array_sum))
class TestCrystalMBTR(unittest.TestCase):
def load_xyz(self, filename, n_crystals):
xyz_fn = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
filename,
)
crystals = read_xyz_crystal(xyz_fn)
self.assertEqual(len(crystals), n_crystals)
return crystals
def test_atomic_numbers(self):
mbtr = PeriodicMBTR2D(grid_size=10)
nacl = self.load_xyz('nacl.xyz', 1)
mbtr.fit(nacl)
arrays, ans = mbtr.transform(nacl)
self.assertEqual(ans, [11, 17])
def test_mbtr_shape(self):
nacl = self.load_xyz('nacl.xyz', 1)
mbtr = PeriodicMBTR2D(grid_size=10, smearing_factor=0.1)
mbtr.fit(nacl)
arrays, ans = mbtr.transform(nacl)
nacl_array = arrays[0]
self.assertTupleEqual(nacl_array.shape, (2, 2, 10))
| [
"numpy.triu_indices",
"mbtr.read_xyz_molecule",
"mbtr.PeriodicMBTR2D",
"os.path.realpath",
"numpy.array",
"numpy.sum",
"mbtr.read_xyz_crystal",
"mbtr.MolsMBTR2D"
] | [((371, 396), 'mbtr.read_xyz_molecule', 'read_xyz_molecule', (['xyz_fn'], {}), '(xyz_fn)\n', (388, 396), False, 'from mbtr import read_xyz_molecule\n'), ((527, 551), 'mbtr.MolsMBTR2D', 'MolsMBTR2D', ([], {'grid_size': '(10)'}), '(grid_size=10)\n', (537, 551), False, 'from mbtr import MolsMBTR2D, read_xyz_crystal, PeriodicMBTR2D\n'), ((1124, 1171), 'mbtr.MolsMBTR2D', 'MolsMBTR2D', ([], {'grid_size': '(10)', 'smearing_factor': '(0.001)'}), '(grid_size=10, smearing_factor=0.001)\n', (1134, 1171), False, 'from mbtr import MolsMBTR2D, read_xyz_crystal, PeriodicMBTR2D\n'), ((1471, 1518), 'mbtr.MolsMBTR2D', 'MolsMBTR2D', ([], {'grid_size': '(10)', 'smearing_factor': '(0.001)'}), '(grid_size=10, smearing_factor=0.001)\n', (1481, 1518), False, 'from mbtr import MolsMBTR2D, read_xyz_crystal, PeriodicMBTR2D\n'), ((1654, 1686), 'numpy.sum', 'numpy.sum', (['aspirin_array'], {'axis': '(2)'}), '(aspirin_array, axis=2)\n', (1663, 1686), False, 'import numpy\n'), ((1797, 1852), 'numpy.array', 'numpy.array', (['[[56, 72, 32], [72, 72, 36], [32, 36, 12]]'], {}), '([[56, 72, 32], [72, 72, 36], [32, 36, 12]])\n', (1808, 1852), False, 'import numpy\n'), ((2296, 2320), 'mbtr.read_xyz_crystal', 'read_xyz_crystal', (['xyz_fn'], {}), '(xyz_fn)\n', (2312, 2320), False, 'from mbtr import MolsMBTR2D, read_xyz_crystal, PeriodicMBTR2D\n'), ((2448, 2476), 'mbtr.PeriodicMBTR2D', 'PeriodicMBTR2D', ([], {'grid_size': '(10)'}), '(grid_size=10)\n', (2462, 2476), False, 'from mbtr import MolsMBTR2D, read_xyz_crystal, PeriodicMBTR2D\n'), ((2719, 2768), 'mbtr.PeriodicMBTR2D', 'PeriodicMBTR2D', ([], {'grid_size': '(10)', 'smearing_factor': '(0.1)'}), '(grid_size=10, smearing_factor=0.1)\n', (2733, 2768), False, 'from mbtr import MolsMBTR2D, read_xyz_crystal, PeriodicMBTR2D\n'), ((1081, 1106), 'numpy.triu_indices', 'numpy.triu_indices', (['(21)', '(1)'], {}), '(21, 1)\n', (1099, 1106), False, 'import numpy\n'), ((290, 316), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (306, 316), False, 'import os\n'), ((2216, 2242), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2232, 2242), False, 'import os\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import pesfit as pf
import time
from hdfio import dict_io as io
import argparse
import multiprocessing as mp
import scipy.io as sio
n_cpu = mp.cpu_count()
fdir = r'E:\Diffraction\20190816_meas2_Lineouts_av_sorted.mat'
diffpat = sio.loadmat(fdir)
npk = 10
lp_prefixes = ['lp'+str(i)+'_' for i in range(1, npk+1)]
npks = [2, 5, 8, 10]
specwidths = [120, 250, 350, 380]
ctvals = np.array([60, 90, 160, 200, 220, 250, 290, 310, 330, 340])
# ctvals = ctvals[:8]
centers = pf.fitter.init_generator(lpnames=lp_prefixes, parname='center',
varkeys=['value', 'min', 'max', 'vary'],
parvals=[[ctvals[i], ctvals[i]-30, ctvals[i]+30, True] for i in range(npk)])
# amplitudes = pf.fitter.init_generator(lpnames=lp_prefixes, parname='amplitude',
# varkeys=['value', 'min', 'max', 'vary'],
# parvals=[[0.6, 0, 3, True] for i in range(npk)])
# sigmas = pf.fitter.init_generator(lpnames=lp_prefixes, parname='sigma',
# varkeys=['value', 'min', 'max', 'vary'],
# parvals=[[20, 2, 150, True] for i in range(npk)])
# gammas = pf.fitter.init_generator(lpnames=lp_prefixes, parname='gamma',
# varkeys=['value', 'min', 'max', 'vary'],
# parvals=[[5, 0, 20, True] for i in range(npk)])
decay = pf.fitter.init_generator(lpnames=['bg_'], parname='decay',
varkeys=['value', 'min', 'vary'],
parvals=[[50, 1, True]])
vardict = centers + decay
dts_seq = []
dts_para = []
# for npk, sw in zip(npks, specwidths):
# dat = np.moveaxis(diffpat['Im_an'], 2, 0)[:, :, 100:sw+100].reshape((225, sw))
# dat -= dat.min(axis=1)[:, None]
# ctvals = ctvals[:npk]
# # Time-stamping for the sequential fit
# kfit = pf.fitter.PatchFitter(peaks={'Voigt':npk}, background='Exponential', xdata=np.arange(sw), ydata=dat)
# kfit.set_inits(inits_dict=vardict, band_inits=None, offset=0)
# t_start = time.perf_counter()
# kfit.sequential_fit(jitter_init=False, varkeys=[], nspec=225, include_vary=False, pbar=True)
# t_end = time.perf_counter()
# dt = t_end - t_start
# print(dt)
# dts_seq.append(dt)
chunks = [50, 50, 30, 3]
for npk, sw, cs in zip(npks[3:], specwidths[3:], chunks[3:]):
dat = np.moveaxis(diffpat['Im_an'], 2, 0)[:, :, 100:sw+100].reshape((225, sw))
dat -= dat.min(axis=1)[:, None]
ctvals = ctvals[:npk]
# Time-stamping for the distributed fit
if __name__ == '__main__':
dfit = pf.fitter.DistributedFitter(peaks={'Voigt':npk}, background='Exponential', xdata=np.arange(sw), ydata=dat, nfitter=225)
dfit.set_inits(inits_dict=vardict, band_inits=None, offset=0)
t_start = time.perf_counter()
dfit.parallel_fit(jitter_init=False, shifts=np.arange(-0.08, 0.09, 0.01),
backend='async', include_vary=False, chunksize=cs, pbar=True)
t_end = time.perf_counter()
dt = t_end - t_start
print(dt)
dts_para.append(dt) | [
"scipy.io.loadmat",
"time.perf_counter",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.moveaxis",
"pesfit.fitter.init_generator",
"numpy.arange"
] | [((218, 232), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (230, 232), True, 'import multiprocessing as mp\n'), ((307, 324), 'scipy.io.loadmat', 'sio.loadmat', (['fdir'], {}), '(fdir)\n', (318, 324), True, 'import scipy.io as sio\n'), ((457, 515), 'numpy.array', 'np.array', (['[60, 90, 160, 200, 220, 250, 290, 310, 330, 340]'], {}), '([60, 90, 160, 200, 220, 250, 290, 310, 330, 340])\n', (465, 515), True, 'import numpy as np\n'), ((1520, 1641), 'pesfit.fitter.init_generator', 'pf.fitter.init_generator', ([], {'lpnames': "['bg_']", 'parname': '"""decay"""', 'varkeys': "['value', 'min', 'vary']", 'parvals': '[[50, 1, True]]'}), "(lpnames=['bg_'], parname='decay', varkeys=['value',\n 'min', 'vary'], parvals=[[50, 1, True]])\n", (1544, 1641), True, 'import pesfit as pf\n'), ((2951, 2970), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2968, 2970), False, 'import time\n'), ((3155, 3174), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3172, 3174), False, 'import time\n'), ((2517, 2552), 'numpy.moveaxis', 'np.moveaxis', (["diffpat['Im_an']", '(2)', '(0)'], {}), "(diffpat['Im_an'], 2, 0)\n", (2528, 2552), True, 'import numpy as np\n'), ((2824, 2837), 'numpy.arange', 'np.arange', (['sw'], {}), '(sw)\n', (2833, 2837), True, 'import numpy as np\n'), ((3023, 3051), 'numpy.arange', 'np.arange', (['(-0.08)', '(0.09)', '(0.01)'], {}), '(-0.08, 0.09, 0.01)\n', (3032, 3051), True, 'import numpy as np\n')] |
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
"""
Created on April 30, 2018
@author: mandd
"""
#External Modules---------------------------------------------------------------
import numpy as np
import xml.etree.ElementTree as ET
from utils import utils
from utils import graphStructure as GS
import copy
from utils import xmlUtils as xmlU
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from PluginBaseClasses.ExternalModelPluginBase import ExternalModelPluginBase
#Internal Modules End-----------------------------------------------------------
class GraphModel(ExternalModelPluginBase):
"""
This class is designed to create a directed graph model which is employed to model Reliability Block Diagrams
"""
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
ExternalModelPluginBase.__init__(self)
def _readMoreXML(self, container, xmlNode):
"""
Method to read the portion of the XML that belongs to GraphModel
@ In, container, object, self-like object where all the variables can be stored
@ In, xmlNode, xml.etree.ElementTree.Element, XML node that needs to be read
@ Out, None
"""
container.modelFile = None # file name containing the RBD structure
container.nodesIN = None # ID of the RBD input nodes
container.nodesOUT = None # ID of the RBD output nodes
container.mapping = {} # Mapping dictionary for input variables
container.InvMapping = {} # Inverse mapping dictionary for input variables
for child in xmlNode:
if child.tag == 'nodesIN':
container.nodesIN = [str(var.strip()) for var in child.text.split(",")]
elif child.tag == 'nodesOUT':
container.nodesOUT = [str(var.strip()) for var in child.text.split(",")]
elif child.tag == 'modelFile':
container.modelFile = child.text.strip() + '.xml'
elif child.tag == 'map':
container.mapping[child.get('var')] = child.text.strip()
container.InvMapping[child.text.strip()] = child.get('var')
elif child.tag == 'variables':
variables = [str(var.strip()) for var in child.text.split(",")]
else:
print('xml error')
if container.nodesIN is None:
raise IOError("GraphModel: <nodesIN> XML block is not specified")
if container.nodesOUT is None:
raise IOError("GraphModel: <nodesOUT> XML block is not specified")
if container.modelFile is None:
raise IOError("GraphModel: <modelFile> XML block is not specified")
if set(variables) != set(container.mapping.keys()):
raise IOError("GraphModel: the set of variables specified in the <variables> " + str(set(variables)) + " XML block does not match with the specified mapping" + str(set(container.mapping.keys())))
if not set(container.nodesOUT) <= set(container.mapping.values()):
raise IOError("GraphModel: the set of out variables specified in the <nodesOUT> " + str(set(variables)) + " XML block does not match with the specified mapping" + str(set(container.mapping.values())))
def initialize(self, container,runInfoDict,inputFiles):
"""
Method to initialize the GraphModel
@ In, container, object, self-like object where all the variables can be stored
@ In, runInfoDict, dict, dictionary containing all the RunInfo parameters (XML node <RunInfo>)
@ In, inputFiles, list, list of input files (if any)
@ Out, None
"""
container.nodes = {}
container.deg = {}
container.runInfo = runInfoDict
self.createGraph(container,container.modelFile)
def createGraph(self,container,file):
"""
Method that actually creates from file the graph structure of the model
@ In, container, object, self-like object where all the variables can be stored
@ In, file, file, file containing the structure of the model
@ Out, None
"""
graph = ET.parse(container.runInfo['WorkingDir'] + '/' + file)
graph = xmlU.findAllRecursive(graph,'Graph')
for node in xmlU.findAllRecursive(graph[0], 'node'):
nodeName = node.get('name')
nodeChilds = []
deg = None
for child in node:
if child.tag == 'childs':
nodeChilds = [var.strip() for var in child.text.split(",")]
if child.tag == 'deg':
deg = float(child.text)
container.nodes[nodeName] = nodeChilds
container.deg[nodeName] = deg
def run(self, container, Inputs):
"""
This method computes all possible path from the input to the output nodes
@ In, container, object, self-like object where all the variables can be stored
@ In, Inputs, dict, dictionary of inputs from RAVEN
"""
if self.checkTypeOfAnalysis(container,Inputs):
dictOUT = self.runTimeDep(container, Inputs)
else:
dictOUT = self.runStatic(container, Inputs)
for var in dictOUT.keys():
container.__dict__[var] = dictOUT[var]
def checkTypeOfAnalysis(self,container,Inputs):
"""
This method checks which type of analysis to be performed:
- True: dynamic (time dependent)
- False: static
@ In, container, object, self-like object where all the variables can be stored
@ In, Inputs, dict, dictionary of inputs from RAVEN
@ Out, analysisType, bool, type of analysis to be performed
"""
arrayValues=set()
for key in Inputs.keys():
if key in container.mapping.keys():
arrayValues.add(Inputs[key][0])
analysisType = None
if arrayValues.difference({0.,1.}):
analysisType = True
else:
analysisType = False
return analysisType
def runStatic(self, container, Inputs):
"""
This method performs a static analysis of the graph model
@ In, container, object, self-like object where all the variables can be stored
@ In, Inputs, dict, dictionary of inputs from RAVEN
@ Out, dictOut, dict, dictionary containing the status of all output variables
"""
mapping = copy.deepcopy(container.mapping)
nodes = copy.deepcopy(container.nodes)
for key in Inputs.keys():
if key in mapping.keys():
if mapping[key] in nodes.keys() and Inputs[key][0] == 1.0:
nodes.pop(mapping[key],None)
for node in nodes.keys():
if mapping[key] in nodes[node]:
nodes[node].remove(mapping[key])
ravenGraph = GS.graphObject(nodes)
dictOut = {}
for nodeO in container.nodesOUT:
paths = []
for nodeI in container.nodesIN:
paths = paths + ravenGraph.findAllPaths(nodeI,nodeO)
var = container.InvMapping[nodeO]
if paths:
dictOut[var] = np.asarray(0.)
else:
dictOut[var] = np.asarray(1.)
return dictOut
def runTimeDep(self, container, Inputs):
"""
This method performs a dynamic analysis of the graph model
@ In, container, object, self-like object where all the variables can be stored
@ In, Inputs, dict, dictionary of inputs from RAVEN
@ Out, outcome, dict, dictionary containing the temporal status of all output variables
"""
times = []
times.append(0.)
for key in Inputs.keys():
if key in container.mapping.keys() and Inputs[key][0]!=1.:
times.append(Inputs[key][0])
times = sorted(times, key=float)
outcome={}
for var in container.nodesOUT:
outcome[container.InvMapping[var]] = np.asarray([0.])
for time in times:
inputToPass=self.inputToBePassed(container,time,Inputs)
tempOut = self.runStatic(container, inputToPass)
for var in tempOut.keys():
if tempOut[var] == 1.:
if time == 0.:
outcome[var] = np.asarray([1.])
else:
if outcome[var][0] <= 0:
outcome[var] = np.asarray([time])
return outcome
def inputToBePassed(self,container,time,Inputs):
"""
This method returns the status of the input variables at time t=time
@ In, container, object, self-like object where all the variables can be stored
@ In, Inputs, dict, dictionary of inputs from RAVEN
@ In, time, float, time at which the input variables need to be evaluated
@ Out, inputToBePassed, dict, value of the RBD nodes at t=time
"""
inputToBePassed = {}
for key in Inputs.keys():
if key in container.mapping.keys():
if Inputs[key][0] == 0. or Inputs[key][0] == 1.:
inputToBePassed[key] = Inputs[key]
else:
if Inputs[key][0] > time:
inputToBePassed[key] = np.asarray([0.])
else:
inputToBePassed[key] = np.asarray([1.])
return inputToBePassed
| [
"PluginBaseClasses.ExternalModelPluginBase.ExternalModelPluginBase.__init__",
"xml.etree.ElementTree.parse",
"utils.graphStructure.graphObject",
"numpy.asarray",
"copy.deepcopy",
"utils.xmlUtils.findAllRecursive"
] | [((954, 992), 'PluginBaseClasses.ExternalModelPluginBase.ExternalModelPluginBase.__init__', 'ExternalModelPluginBase.__init__', (['self'], {}), '(self)\n', (986, 992), False, 'from PluginBaseClasses.ExternalModelPluginBase import ExternalModelPluginBase\n'), ((4028, 4082), 'xml.etree.ElementTree.parse', 'ET.parse', (["(container.runInfo['WorkingDir'] + '/' + file)"], {}), "(container.runInfo['WorkingDir'] + '/' + file)\n", (4036, 4082), True, 'import xml.etree.ElementTree as ET\n'), ((4095, 4132), 'utils.xmlUtils.findAllRecursive', 'xmlU.findAllRecursive', (['graph', '"""Graph"""'], {}), "(graph, 'Graph')\n", (4116, 4132), True, 'from utils import xmlUtils as xmlU\n'), ((4149, 4188), 'utils.xmlUtils.findAllRecursive', 'xmlU.findAllRecursive', (['graph[0]', '"""node"""'], {}), "(graph[0], 'node')\n", (4170, 4188), True, 'from utils import xmlUtils as xmlU\n'), ((6115, 6147), 'copy.deepcopy', 'copy.deepcopy', (['container.mapping'], {}), '(container.mapping)\n', (6128, 6147), False, 'import copy\n'), ((6162, 6192), 'copy.deepcopy', 'copy.deepcopy', (['container.nodes'], {}), '(container.nodes)\n', (6175, 6192), False, 'import copy\n'), ((6507, 6528), 'utils.graphStructure.graphObject', 'GS.graphObject', (['nodes'], {}), '(nodes)\n', (6521, 6528), True, 'from utils import graphStructure as GS\n'), ((7525, 7542), 'numpy.asarray', 'np.asarray', (['[0.0]'], {}), '([0.0])\n', (7535, 7542), True, 'import numpy as np\n'), ((6779, 6794), 'numpy.asarray', 'np.asarray', (['(0.0)'], {}), '(0.0)\n', (6789, 6794), True, 'import numpy as np\n'), ((6829, 6844), 'numpy.asarray', 'np.asarray', (['(1.0)'], {}), '(1.0)\n', (6839, 6844), True, 'import numpy as np\n'), ((7799, 7816), 'numpy.asarray', 'np.asarray', (['[1.0]'], {}), '([1.0])\n', (7809, 7816), True, 'import numpy as np\n'), ((8656, 8673), 'numpy.asarray', 'np.asarray', (['[0.0]'], {}), '([0.0])\n', (8666, 8673), True, 'import numpy as np\n'), ((8724, 8741), 'numpy.asarray', 'np.asarray', (['[1.0]'], {}), '([1.0])\n', (8734, 8741), True, 'import numpy as np\n'), ((7898, 7916), 'numpy.asarray', 'np.asarray', (['[time]'], {}), '([time])\n', (7908, 7916), True, 'import numpy as np\n')] |
import sys
import numpy as np
import torch.onnx
import onnx
import onnxruntime as ort
from model_ignore.model_sol import Model
def convert(model, input, device, filename):
# setup
model = model.eval()
model = model.to(device)
input = input.to(device)
print("First, a sanity check.")
try:
model(input)
print("Sanity check passed.")
except Exception as e:
print(e)
print("Sanity check did not work. Bailing out now...")
exit(250)
print()
print("Second, let's do the export to onnx format.")
torch.onnx.export(
model,
input,
filename,
export_params=True,
opset_version=11,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
#dynamic_axes={'input': {0: 'batch_size'}, # todo this might be the wrong thing for you, but this generally works in most cases
# 'output': {0: 'batch_size'}}
)
print("Export went fine.")
print()
print("Third, let's run the onnx model_ignore checker on the new model_ignore.")
onnx.checker.check_model(
onnx.load(filename)
)
print("That went fine.")
print()
print("Finally, let's ensure that the onnx model_ignore is able to do inference")
ort_session = ort.InferenceSession(filename)
numpy_input = input.numpy()
outputs = ort_session.run(None, {ort_session.get_inputs()[0].name: numpy_input})
print("The inference was alright.")
print()
print("Now, please verify that the output shape and type makes sense:")
print(outputs)
print("Keep in mind, the output shape of your original model_ignore was this one:")
print(model(input))
print("Also, keep in mind that it's normal if onnx replaces dicts by lists and does other normalization steps.")
print(f"""
Be advised that this onnx model expects inputs of type {input.numpy().dtype} and of shape {input.numpy().shape}.
Anything else will cause an error.""", file=sys.stderr)
print()
print("Have a great day :)")
if __name__ == "__main__":
# The only thing you have to do is change the functions "model" and "input"
def model():
"""
Returns your model.
:return: your torch model
"""
# todo change this to get your own model_ignore
m = Model()
m.load_state_dict(torch.load("./model_ignore/weights/model.pt"))
m = m.model
# end todo
return m
def input():
"""
Returns a sample input for your model. Can be a static input instead of a random one.
:return: a torch tensor of appropriate shape and type
"""
# todo change this to get adequate sample input for your model_ignore
i = np.random.randint(0, 255, (224, 224, 3))
from torchvision.transforms.functional import to_tensor
i = to_tensor(i)
i = i.to(dtype=torch.float)
i = i.unsqueeze(0)
# end todo
return i
try:
convert(model(), input(), torch.device("cpu"), "model.onnx")
except Exception as e:
print(e)
print("It seems like the conversion failed.")
print("Consider changing the torch.device to cuda instead of cpu. Sometimes, the conversion only works on cpu, and vice-versa.")
# Source for the cpu vs cuda device thing: https://github.com/pytorch/vision/issues/1706
print("Make sure that the input function is adequate for your model.") | [
"torchvision.transforms.functional.to_tensor",
"onnxruntime.InferenceSession",
"numpy.random.randint",
"onnx.load",
"model_ignore.model_sol.Model"
] | [((1323, 1353), 'onnxruntime.InferenceSession', 'ort.InferenceSession', (['filename'], {}), '(filename)\n', (1343, 1353), True, 'import onnxruntime as ort\n'), ((1151, 1170), 'onnx.load', 'onnx.load', (['filename'], {}), '(filename)\n', (1160, 1170), False, 'import onnx\n'), ((2365, 2372), 'model_ignore.model_sol.Model', 'Model', ([], {}), '()\n', (2370, 2372), False, 'from model_ignore.model_sol import Model\n'), ((2790, 2830), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(224, 224, 3)'], {}), '(0, 255, (224, 224, 3))\n', (2807, 2830), True, 'import numpy as np\n'), ((2907, 2919), 'torchvision.transforms.functional.to_tensor', 'to_tensor', (['i'], {}), '(i)\n', (2916, 2919), False, 'from torchvision.transforms.functional import to_tensor\n')] |
import numpy as np
import torch
from sklearn.preprocessing import normalize
from torch_geometric.datasets import Planetoid
def get_dataset(dataset):
datasets = Planetoid('./dataset', dataset)
return datasets
def data_preprocessing(dataset):
dataset.adj = torch.sparse_coo_tensor(
dataset.edge_index, torch.ones(dataset.edge_index.shape[1]), torch.Size([dataset.x.shape[0], dataset.x.shape[0]])
).to_dense()
dataset.adj_label = dataset.adj
dataset.adj += torch.eye(dataset.x.shape[0])
dataset.adj = normalize(dataset.adj, norm="l1")
dataset.adj = torch.from_numpy(dataset.adj).to(dtype=torch.float)
return dataset
def get_M(adj):
adj_numpy = adj.cpu().numpy()
# t_order
t=2
tran_prob = normalize(adj_numpy, norm="l1", axis=0)
M_numpy = sum([np.linalg.matrix_power(tran_prob, i) for i in range(1, t + 1)]) / t
return torch.Tensor(M_numpy)
| [
"torch.eye",
"torch.Tensor",
"torch.from_numpy",
"numpy.linalg.matrix_power",
"torch_geometric.datasets.Planetoid",
"sklearn.preprocessing.normalize",
"torch.Size",
"torch.ones"
] | [((167, 198), 'torch_geometric.datasets.Planetoid', 'Planetoid', (['"""./dataset"""', 'dataset'], {}), "('./dataset', dataset)\n", (176, 198), False, 'from torch_geometric.datasets import Planetoid\n'), ((491, 520), 'torch.eye', 'torch.eye', (['dataset.x.shape[0]'], {}), '(dataset.x.shape[0])\n', (500, 520), False, 'import torch\n'), ((539, 572), 'sklearn.preprocessing.normalize', 'normalize', (['dataset.adj'], {'norm': '"""l1"""'}), "(dataset.adj, norm='l1')\n", (548, 572), False, 'from sklearn.preprocessing import normalize\n'), ((752, 791), 'sklearn.preprocessing.normalize', 'normalize', (['adj_numpy'], {'norm': '"""l1"""', 'axis': '(0)'}), "(adj_numpy, norm='l1', axis=0)\n", (761, 791), False, 'from sklearn.preprocessing import normalize\n'), ((890, 911), 'torch.Tensor', 'torch.Tensor', (['M_numpy'], {}), '(M_numpy)\n', (902, 911), False, 'import torch\n'), ((591, 620), 'torch.from_numpy', 'torch.from_numpy', (['dataset.adj'], {}), '(dataset.adj)\n', (607, 620), False, 'import torch\n'), ((324, 363), 'torch.ones', 'torch.ones', (['dataset.edge_index.shape[1]'], {}), '(dataset.edge_index.shape[1])\n', (334, 363), False, 'import torch\n'), ((365, 417), 'torch.Size', 'torch.Size', (['[dataset.x.shape[0], dataset.x.shape[0]]'], {}), '([dataset.x.shape[0], dataset.x.shape[0]])\n', (375, 417), False, 'import torch\n'), ((811, 847), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['tran_prob', 'i'], {}), '(tran_prob, i)\n', (833, 847), True, 'import numpy as np\n')] |
import tempfile
import os
import numpy
import numpy.testing
import h5py
from deeprank.tools.sparse import FLANgrid
def test_preserved():
beta = 1E-2
data = numpy.array([[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]])
spg = FLANgrid()
spg.from_dense(data, beta=beta)
hdf5_file, hdf5_path = tempfile.mkstemp()
os.close(hdf5_file)
with h5py.File(hdf5_path, 'w') as f5:
f5.create_dataset("index", data=spg.index, compression='gzip', compression_opts=9)
f5.create_dataset("value", data=spg.value, compression='gzip', compression_opts=9)
with h5py.File(hdf5_path, 'r') as f5:
spg_retrieve = FLANgrid(True, f5['index'][:], f5['value'][:], data.shape)
retrieved_data = spg_retrieve.to_dense()
assert numpy.all(numpy.abs(data - retrieved_data) < beta), "{} is not the same as {}".format(data, retrieved_data)
| [
"numpy.abs",
"deeprank.tools.sparse.FLANgrid",
"os.close",
"h5py.File",
"numpy.array",
"tempfile.mkstemp"
] | [((184, 290), 'numpy.array', 'numpy.array', (['[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.1, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.1, 0.0], [0.0, 0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0, 0.0]])\n', (195, 290), False, 'import numpy\n'), ((374, 384), 'deeprank.tools.sparse.FLANgrid', 'FLANgrid', ([], {}), '()\n', (382, 384), False, 'from deeprank.tools.sparse import FLANgrid\n'), ((452, 470), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (468, 470), False, 'import tempfile\n'), ((476, 495), 'os.close', 'os.close', (['hdf5_file'], {}), '(hdf5_file)\n', (484, 495), False, 'import os\n'), ((506, 531), 'h5py.File', 'h5py.File', (['hdf5_path', '"""w"""'], {}), "(hdf5_path, 'w')\n", (515, 531), False, 'import h5py\n'), ((735, 760), 'h5py.File', 'h5py.File', (['hdf5_path', '"""r"""'], {}), "(hdf5_path, 'r')\n", (744, 760), False, 'import h5py\n'), ((792, 850), 'deeprank.tools.sparse.FLANgrid', 'FLANgrid', (['(True)', "f5['index'][:]", "f5['value'][:]", 'data.shape'], {}), "(True, f5['index'][:], f5['value'][:], data.shape)\n", (800, 850), False, 'from deeprank.tools.sparse import FLANgrid\n'), ((923, 955), 'numpy.abs', 'numpy.abs', (['(data - retrieved_data)'], {}), '(data - retrieved_data)\n', (932, 955), False, 'import numpy\n')] |
#!/usr/bin/env python3
import gym
import ptan
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
#from tensorboardX import SummaryWriter
from lib.SummaryWriter import SummaryWriter
from lib import common
from shohdi_lib import ShohdiExperienceReplayBuffer
SAVE_STATES_IMG = False
SAVE_TRANSITIONS_IMG = False
if SAVE_STATES_IMG or SAVE_TRANSITIONS_IMG:
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pylab as plt
Vmax = 10
Vmin = -10
N_ATOMS = 51
DELTA_Z = (Vmax - Vmin) / (N_ATOMS - 1)
STATES_TO_EVALUATE = 1000
EVAL_EVERY_FRAME = 100
class DistributionalDQN(nn.Module):
def __init__(self, input_shape, n_actions):
super(DistributionalDQN, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.fc = nn.Sequential(
nn.Linear(conv_out_size, 512),
nn.ReLU(),
nn.Linear(512, n_actions * N_ATOMS)
)
self.register_buffer("supports", torch.arange(Vmin, Vmax+DELTA_Z, DELTA_Z))
self.softmax = nn.Softmax(dim=1)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x):
batch_size = x.size()[0]
fx = x.float() / 256
conv_out = self.conv(fx).view(batch_size, -1)
fc_out = self.fc(conv_out)
return fc_out.view(batch_size, -1, N_ATOMS)
def both(self, x):
cat_out = self(x)
probs = self.apply_softmax(cat_out)
weights = probs * self.supports
res = weights.sum(dim=2)
return cat_out, res
def qvals(self, x):
return self.both(x)[1]
def apply_softmax(self, t):
return self.softmax(t.view(-1, N_ATOMS)).view(t.size())
def calc_values_of_states(states, net, device="cpu"):
mean_vals = []
for batch in np.array_split(states, 64):
states_v = torch.tensor(batch).to(device)
action_values_v = net.qvals(states_v)
best_action_values_v = action_values_v.max(1)[0]
mean_vals.append(best_action_values_v.mean().item())
return np.mean(mean_vals)
def save_state_images(frame_idx, states, net, device="cpu", max_states=200):
ofs = 0
p = np.arange(Vmin, Vmax + DELTA_Z, DELTA_Z)
for batch in np.array_split(states, 64):
states_v = torch.tensor(batch).to(device)
action_prob = net.apply_softmax(net(states_v)).data.cpu().numpy()
batch_size, num_actions, _ = action_prob.shape
for batch_idx in range(batch_size):
plt.clf()
for action_idx in range(num_actions):
plt.subplot(num_actions, 1, action_idx+1)
plt.bar(p, action_prob[batch_idx, action_idx], width=0.5)
plt.savefig("states/%05d_%08d.png" % (ofs + batch_idx, frame_idx))
ofs += batch_size
if ofs >= max_states:
break
def save_transition_images(batch_size, predicted, projected, next_distr, dones, rewards, save_prefix):
for batch_idx in range(batch_size):
is_done = dones[batch_idx]
reward = rewards[batch_idx]
plt.clf()
p = np.arange(Vmin, Vmax + DELTA_Z, DELTA_Z)
plt.subplot(3, 1, 1)
plt.bar(p, predicted[batch_idx], width=0.5)
plt.title("Predicted")
plt.subplot(3, 1, 2)
plt.bar(p, projected[batch_idx], width=0.5)
plt.title("Projected")
plt.subplot(3, 1, 3)
plt.bar(p, next_distr[batch_idx], width=0.5)
plt.title("Next state")
suffix = ""
if reward != 0.0:
suffix = suffix + "_%.0f" % reward
if is_done:
suffix = suffix + "_done"
plt.savefig("%s_%02d%s.png" % (save_prefix, batch_idx, suffix))
def calc_loss(batch, net, tgt_net, gamma, device="cpu", save_prefix=None):
states, actions, rewards, dones, next_states = common.unpack_batch(batch)
batch_size = len(batch)
states_v = torch.tensor(states).to(device)
actions_v = torch.tensor(actions).to(device)
next_states_v = torch.tensor(next_states).to(device)
# next state distribution
next_distr_v, next_qvals_v = tgt_net.both(next_states_v)
next_actions = next_qvals_v.max(1)[1].data.cpu().numpy()
next_distr = tgt_net.apply_softmax(next_distr_v).data.cpu().numpy()
next_best_distr = next_distr[range(batch_size), next_actions]
dones = dones.astype(np.bool)
# project our distribution using Bellman update
proj_distr = common.distr_projection(next_best_distr, rewards, dones, Vmin, Vmax, N_ATOMS, gamma)
# calculate net output
distr_v = net(states_v)
state_action_values = distr_v[range(batch_size), actions_v.data]
state_log_sm_v = F.log_softmax(state_action_values, dim=1)
proj_distr_v = torch.tensor(proj_distr).to(device)
if save_prefix is not None:
pred = F.softmax(state_action_values, dim=1).data.cpu().numpy()
save_transition_images(batch_size, pred, proj_distr, next_best_distr, dones, rewards, save_prefix)
loss_v = -state_log_sm_v * proj_distr_v
return loss_v.sum(dim=1).mean()
if __name__ == "__main__":
params = common.HYPERPARAMS['pong']
# params['epsilon_frames'] *= 2
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params['env_name'])
env = ptan.common.wrappers.wrap_dqn(env)
writer = SummaryWriter(comment="-" + params['run_name'] + "-distrib")
net = DistributionalDQN(env.observation_space.shape, env.action_space.n).to(device)
tgt_net = ptan.agent.TargetNet(net)
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params['epsilon_start'])
epsilon_tracker = common.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(lambda x: net.qvals(x), selector, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'], steps_count=1)
buffer = ShohdiExperienceReplayBuffer.ShohdiExperienceReplayBuffer(exp_source, buffer_size=params['replay_size'],name='test_dqn_sample')
optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])
frame_idx = 0
eval_states = None
prev_save = 0
save_prefix = None
with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
while True:
frame_idx += 1
buffer.populate(1)
epsilon_tracker.frame(frame_idx)
new_rewards = exp_source.pop_total_rewards()
if new_rewards:
if reward_tracker.reward(new_rewards[0], frame_idx, selector.epsilon):
break
if len(buffer) < params['replay_initial']:
continue
if eval_states is None:
eval_states = buffer.sample(STATES_TO_EVALUATE)
eval_states = [np.array(transition.state, copy=False) for transition in eval_states]
eval_states = np.array(eval_states, copy=False)
optimizer.zero_grad()
batch = buffer.sample(params['batch_size'])
save_prefix = None
if SAVE_TRANSITIONS_IMG:
interesting = any(map(lambda s: s.last_state is None or s.reward != 0.0, batch))
if interesting and frame_idx // 30000 > prev_save:
save_prefix = "images/img_%08d" % frame_idx
prev_save = frame_idx // 30000
loss_v = calc_loss(batch, net, tgt_net.target_model, gamma=params['gamma'],
device=device, save_prefix=save_prefix)
loss_v.backward()
optimizer.step()
if frame_idx % params['target_net_sync'] == 0:
tgt_net.sync()
if frame_idx % EVAL_EVERY_FRAME == 0:
mean_val = calc_values_of_states(eval_states, net, device=device)
writer.add_scalar("values_mean", mean_val, frame_idx)
if SAVE_STATES_IMG and frame_idx % 10000 == 0:
save_state_images(frame_idx, eval_states, net, device=device)
| [
"torch.nn.ReLU",
"matplotlib.pylab.savefig",
"numpy.array_split",
"numpy.array",
"gym.make",
"numpy.arange",
"torch.arange",
"torch.nn.functional.softmax",
"numpy.mean",
"ptan.actions.EpsilonGreedyActionSelector",
"matplotlib.pylab.clf",
"argparse.ArgumentParser",
"matplotlib.pylab.title",
... | [((473, 487), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (480, 487), True, 'import matplotlib as mpl\n'), ((2209, 2235), 'numpy.array_split', 'np.array_split', (['states', '(64)'], {}), '(states, 64)\n', (2223, 2235), True, 'import numpy as np\n'), ((2462, 2480), 'numpy.mean', 'np.mean', (['mean_vals'], {}), '(mean_vals)\n', (2469, 2480), True, 'import numpy as np\n'), ((2580, 2620), 'numpy.arange', 'np.arange', (['Vmin', '(Vmax + DELTA_Z)', 'DELTA_Z'], {}), '(Vmin, Vmax + DELTA_Z, DELTA_Z)\n', (2589, 2620), True, 'import numpy as np\n'), ((2638, 2664), 'numpy.array_split', 'np.array_split', (['states', '(64)'], {}), '(states, 64)\n', (2652, 2664), True, 'import numpy as np\n'), ((4222, 4248), 'lib.common.unpack_batch', 'common.unpack_batch', (['batch'], {}), '(batch)\n', (4241, 4248), False, 'from lib import common\n'), ((4827, 4915), 'lib.common.distr_projection', 'common.distr_projection', (['next_best_distr', 'rewards', 'dones', 'Vmin', 'Vmax', 'N_ATOMS', 'gamma'], {}), '(next_best_distr, rewards, dones, Vmin, Vmax,\n N_ATOMS, gamma)\n', (4850, 4915), False, 'from lib import common\n'), ((5058, 5099), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['state_action_values'], {'dim': '(1)'}), '(state_action_values, dim=1)\n', (5071, 5099), True, 'import torch.nn.functional as F\n'), ((5565, 5590), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5588, 5590), False, 'import argparse\n'), ((5725, 5769), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (5737, 5769), False, 'import torch\n'), ((5781, 5809), 'gym.make', 'gym.make', (["params['env_name']"], {}), "(params['env_name'])\n", (5789, 5809), False, 'import gym\n'), ((5820, 5854), 'ptan.common.wrappers.wrap_dqn', 'ptan.common.wrappers.wrap_dqn', (['env'], {}), '(env)\n', (5849, 5854), False, 'import ptan\n'), ((5869, 5929), 'lib.SummaryWriter.SummaryWriter', 'SummaryWriter', ([], {'comment': "('-' + params['run_name'] + '-distrib')"}), "(comment='-' + params['run_name'] + '-distrib')\n", (5882, 5929), False, 'from lib.SummaryWriter import SummaryWriter\n'), ((6033, 6058), 'ptan.agent.TargetNet', 'ptan.agent.TargetNet', (['net'], {}), '(net)\n', (6053, 6058), False, 'import ptan\n'), ((6074, 6147), 'ptan.actions.EpsilonGreedyActionSelector', 'ptan.actions.EpsilonGreedyActionSelector', ([], {'epsilon': "params['epsilon_start']"}), "(epsilon=params['epsilon_start'])\n", (6114, 6147), False, 'import ptan\n'), ((6170, 6209), 'lib.common.EpsilonTracker', 'common.EpsilonTracker', (['selector', 'params'], {}), '(selector, params)\n', (6191, 6209), False, 'from lib import common\n'), ((6309, 6404), 'ptan.experience.ExperienceSourceFirstLast', 'ptan.experience.ExperienceSourceFirstLast', (['env', 'agent'], {'gamma': "params['gamma']", 'steps_count': '(1)'}), "(env, agent, gamma=params['gamma'],\n steps_count=1)\n", (6350, 6404), False, 'import ptan\n'), ((6414, 6546), 'shohdi_lib.ShohdiExperienceReplayBuffer.ShohdiExperienceReplayBuffer', 'ShohdiExperienceReplayBuffer.ShohdiExperienceReplayBuffer', (['exp_source'], {'buffer_size': "params['replay_size']", 'name': '"""test_dqn_sample"""'}), "(exp_source,\n buffer_size=params['replay_size'], name='test_dqn_sample')\n", (6471, 6546), False, 'from shohdi_lib import ShohdiExperienceReplayBuffer\n'), ((1400, 1417), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1410, 1417), True, 'import torch.nn as nn\n'), ((3470, 3479), 'matplotlib.pylab.clf', 'plt.clf', ([], {}), '()\n', (3477, 3479), True, 'import matplotlib.pylab as plt\n'), ((3492, 3532), 'numpy.arange', 'np.arange', (['Vmin', '(Vmax + DELTA_Z)', 'DELTA_Z'], {}), '(Vmin, Vmax + DELTA_Z, DELTA_Z)\n', (3501, 3532), True, 'import numpy as np\n'), ((3541, 3561), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (3552, 3561), True, 'import matplotlib.pylab as plt\n'), ((3570, 3613), 'matplotlib.pylab.bar', 'plt.bar', (['p', 'predicted[batch_idx]'], {'width': '(0.5)'}), '(p, predicted[batch_idx], width=0.5)\n', (3577, 3613), True, 'import matplotlib.pylab as plt\n'), ((3622, 3644), 'matplotlib.pylab.title', 'plt.title', (['"""Predicted"""'], {}), "('Predicted')\n", (3631, 3644), True, 'import matplotlib.pylab as plt\n'), ((3653, 3673), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (3664, 3673), True, 'import matplotlib.pylab as plt\n'), ((3682, 3725), 'matplotlib.pylab.bar', 'plt.bar', (['p', 'projected[batch_idx]'], {'width': '(0.5)'}), '(p, projected[batch_idx], width=0.5)\n', (3689, 3725), True, 'import matplotlib.pylab as plt\n'), ((3734, 3756), 'matplotlib.pylab.title', 'plt.title', (['"""Projected"""'], {}), "('Projected')\n", (3743, 3756), True, 'import matplotlib.pylab as plt\n'), ((3765, 3785), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (3776, 3785), True, 'import matplotlib.pylab as plt\n'), ((3794, 3838), 'matplotlib.pylab.bar', 'plt.bar', (['p', 'next_distr[batch_idx]'], {'width': '(0.5)'}), '(p, next_distr[batch_idx], width=0.5)\n', (3801, 3838), True, 'import matplotlib.pylab as plt\n'), ((3847, 3870), 'matplotlib.pylab.title', 'plt.title', (['"""Next state"""'], {}), "('Next state')\n", (3856, 3870), True, 'import matplotlib.pylab as plt\n'), ((4030, 4093), 'matplotlib.pylab.savefig', 'plt.savefig', (["('%s_%02d%s.png' % (save_prefix, batch_idx, suffix))"], {}), "('%s_%02d%s.png' % (save_prefix, batch_idx, suffix))\n", (4041, 4093), True, 'import matplotlib.pylab as plt\n'), ((6708, 6759), 'lib.common.RewardTracker', 'common.RewardTracker', (['writer', "params['stop_reward']"], {}), "(writer, params['stop_reward'])\n", (6728, 6759), False, 'from lib import common\n'), ((832, 886), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_shape[0]', '(32)'], {'kernel_size': '(8)', 'stride': '(4)'}), '(input_shape[0], 32, kernel_size=8, stride=4)\n', (841, 886), True, 'import torch.nn as nn\n'), ((900, 909), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (907, 909), True, 'import torch.nn as nn\n'), ((923, 965), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(4)', 'stride': '(2)'}), '(32, 64, kernel_size=4, stride=2)\n', (932, 965), True, 'import torch.nn as nn\n'), ((979, 988), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (986, 988), True, 'import torch.nn as nn\n'), ((1002, 1044), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)'}), '(64, 64, kernel_size=3, stride=1)\n', (1011, 1044), True, 'import torch.nn as nn\n'), ((1058, 1067), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1065, 1067), True, 'import torch.nn as nn\n'), ((1180, 1209), 'torch.nn.Linear', 'nn.Linear', (['conv_out_size', '(512)'], {}), '(conv_out_size, 512)\n', (1189, 1209), True, 'import torch.nn as nn\n'), ((1223, 1232), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1230, 1232), True, 'import torch.nn as nn\n'), ((1246, 1281), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(n_actions * N_ATOMS)'], {}), '(512, n_actions * N_ATOMS)\n', (1255, 1281), True, 'import torch.nn as nn\n'), ((1334, 1377), 'torch.arange', 'torch.arange', (['Vmin', '(Vmax + DELTA_Z)', 'DELTA_Z'], {}), '(Vmin, Vmax + DELTA_Z, DELTA_Z)\n', (1346, 1377), False, 'import torch\n'), ((1477, 1499), 'torch.zeros', 'torch.zeros', (['(1)', '*shape'], {}), '(1, *shape)\n', (1488, 1499), False, 'import torch\n'), ((2901, 2910), 'matplotlib.pylab.clf', 'plt.clf', ([], {}), '()\n', (2908, 2910), True, 'import matplotlib.pylab as plt\n'), ((3105, 3171), 'matplotlib.pylab.savefig', 'plt.savefig', (["('states/%05d_%08d.png' % (ofs + batch_idx, frame_idx))"], {}), "('states/%05d_%08d.png' % (ofs + batch_idx, frame_idx))\n", (3116, 3171), True, 'import matplotlib.pylab as plt\n'), ((4293, 4313), 'torch.tensor', 'torch.tensor', (['states'], {}), '(states)\n', (4305, 4313), False, 'import torch\n'), ((4341, 4362), 'torch.tensor', 'torch.tensor', (['actions'], {}), '(actions)\n', (4353, 4362), False, 'import torch\n'), ((4394, 4419), 'torch.tensor', 'torch.tensor', (['next_states'], {}), '(next_states)\n', (4406, 4419), False, 'import torch\n'), ((5119, 5143), 'torch.tensor', 'torch.tensor', (['proj_distr'], {}), '(proj_distr)\n', (5131, 5143), False, 'import torch\n'), ((2256, 2275), 'torch.tensor', 'torch.tensor', (['batch'], {}), '(batch)\n', (2268, 2275), False, 'import torch\n'), ((2685, 2704), 'torch.tensor', 'torch.tensor', (['batch'], {}), '(batch)\n', (2697, 2704), False, 'import torch\n'), ((2977, 3020), 'matplotlib.pylab.subplot', 'plt.subplot', (['num_actions', '(1)', '(action_idx + 1)'], {}), '(num_actions, 1, action_idx + 1)\n', (2988, 3020), True, 'import matplotlib.pylab as plt\n'), ((3035, 3092), 'matplotlib.pylab.bar', 'plt.bar', (['p', 'action_prob[batch_idx, action_idx]'], {'width': '(0.5)'}), '(p, action_prob[batch_idx, action_idx], width=0.5)\n', (3042, 3092), True, 'import matplotlib.pylab as plt\n'), ((7414, 7447), 'numpy.array', 'np.array', (['eval_states'], {'copy': '(False)'}), '(eval_states, copy=False)\n', (7422, 7447), True, 'import numpy as np\n'), ((7314, 7352), 'numpy.array', 'np.array', (['transition.state'], {'copy': '(False)'}), '(transition.state, copy=False)\n', (7322, 7352), True, 'import numpy as np\n'), ((5203, 5240), 'torch.nn.functional.softmax', 'F.softmax', (['state_action_values'], {'dim': '(1)'}), '(state_action_values, dim=1)\n', (5212, 5240), True, 'import torch.nn.functional as F\n')] |
import numpy as np
import random
import pandas
# r matrix
# read the matrix form the appropriate folder
d = pandas.read_csv("C:\\R tables\\0.csv",header = None,index_col=None)
R = np.asarray(d)
# Q Matrix
Q = np.matrix(np.zeros([11,11]))
# gamma (learning parameter)
gamma = 0.8
# Intiial stage. (Usually to be choosen at random)
avilable_states = [0,1,2,3,4,5,6,7,8,9,10]
initial_state = random.choice(avilable_states)
# This function returns all avilable actions in the state given as argument
def avilable_actions(state):
current_state_row = R[state,]
av_act = np.where(current_state_row >= 0)[0]
return av_act
# Get avilable actions in the current state
avilable_act = avilable_actions(initial_state)
# This function chooses at random which action to be performed within the range
# of all the avilable actions
def sample_next_action(avilable_actions_range):
next_action = int(np.random.choice(avilable_act, 1))
return next_action
# Sample next action to be performed
action = sample_next_action(avilable_act)
# This function updates the Q matrix according to the path selected and the Q
# learning algorithm
def update(current_state, action, gamma):
max_index = np.where(Q[action,] == np.max(Q[action,]))[1]
if max_index.shape[0] > 1:
max_index = int(np.random.choice(max_index, size=1))
else:
max_index = int(max_index)
max_value = Q[action, max_index]
# Q learning formula
Q[current_state, action] = R[current_state, action] + gamma * max_value
# Update Q matrix
update(initial_state, action, gamma)
#_____________________________________________________________________________________
# Training
# Train over 10,000 iterations. (Re-iterate the process above).
for i in range(10000):
current_state = np.random.randint(0, int(Q.shape[0]))
avilable_act = avilable_actions(current_state)
action = sample_next_action(avilable_act)
update(current_state, action, gamma)
# Normailze the "Trained" Q matrix
print("Trained Q Matrix: ")
Q = Q / np.max(Q) * 100
print(Q)
#_____________________________________________________________________________________
# Testing
# Goal State = 0
# Best sequence path strting from 2 -> 2,1,0
current_state = 2
steps = [current_state]
# cahnge the value depending upon the R matrix you are using
#for instance for the 0.csv r matrix value in while will be 0 and 1 for 1.csv and so on...
while current_state != 0:
next_step_index = np.where(Q[current_state,] == np.max(Q[current_state,]))[1]
if next_step_index.shape[0] > 1:
next_step_index = int(np.random.choice(next_step_index, size=1))
else:
next_step_index = int(next_step_index)
steps.append(next_step_index)
current_state = next_step_index
# Print selected sequence of steps
print("Selected path : ")
print(steps)
# save the Q matrix generated corrosponding to R matrix as a .csv file
pandas.DataFrame(Q).to_csv("C:\\Q table\\Qtable(9).csv",header = None,index = None)
| [
"random.choice",
"pandas.read_csv",
"numpy.where",
"numpy.random.choice",
"numpy.asarray",
"numpy.max",
"numpy.zeros",
"pandas.DataFrame"
] | [((110, 177), 'pandas.read_csv', 'pandas.read_csv', (['"""C:\\\\R tables\\\\0.csv"""'], {'header': 'None', 'index_col': 'None'}), "('C:\\\\R tables\\\\0.csv', header=None, index_col=None)\n", (125, 177), False, 'import pandas\n'), ((182, 195), 'numpy.asarray', 'np.asarray', (['d'], {}), '(d)\n', (192, 195), True, 'import numpy as np\n'), ((394, 424), 'random.choice', 'random.choice', (['avilable_states'], {}), '(avilable_states)\n', (407, 424), False, 'import random\n'), ((222, 240), 'numpy.zeros', 'np.zeros', (['[11, 11]'], {}), '([11, 11])\n', (230, 240), True, 'import numpy as np\n'), ((575, 607), 'numpy.where', 'np.where', (['(current_state_row >= 0)'], {}), '(current_state_row >= 0)\n', (583, 607), True, 'import numpy as np\n'), ((899, 932), 'numpy.random.choice', 'np.random.choice', (['avilable_act', '(1)'], {}), '(avilable_act, 1)\n', (915, 932), True, 'import numpy as np\n'), ((1993, 2002), 'numpy.max', 'np.max', (['Q'], {}), '(Q)\n', (1999, 2002), True, 'import numpy as np\n'), ((2843, 2862), 'pandas.DataFrame', 'pandas.DataFrame', (['Q'], {}), '(Q)\n', (2859, 2862), False, 'import pandas\n'), ((1286, 1321), 'numpy.random.choice', 'np.random.choice', (['max_index'], {'size': '(1)'}), '(max_index, size=1)\n', (1302, 1321), True, 'import numpy as np\n'), ((2540, 2581), 'numpy.random.choice', 'np.random.choice', (['next_step_index'], {'size': '(1)'}), '(next_step_index, size=1)\n', (2556, 2581), True, 'import numpy as np\n'), ((1216, 1234), 'numpy.max', 'np.max', (['Q[action,]'], {}), '(Q[action,])\n', (1222, 1234), True, 'import numpy as np\n'), ((2451, 2476), 'numpy.max', 'np.max', (['Q[current_state,]'], {}), '(Q[current_state,])\n', (2457, 2476), True, 'import numpy as np\n')] |
import parsers
import utils
import tensorflow.keras as K
from collections import namedtuple
import numpy as np
from sklearn.utils import shuffle
from tensorflow.python.keras.utils import Sequence
from tensorflow.python.keras.utils.data_utils import Sequence
def __len__(training_file_path, batch_size):
return parsers.TrainingParser(training_file_path).count() // batch_size
def get(batch_size, training_file_path, antivocab, output_vocab, PADDING_SIZE = 50, gold_file_path = None):
"""
Batch procesing generator, yields a dict of sentences, candidates and labels if in training mode (determined if gold_file_path is specified)
param batch_size:
param training_file_path:
param antivocab:
param output_vocab:
param gold_file_path:
return: generator object
"""
while True:
batch = {"sentences" : [], "candidates" : []}
training_data_flow = parsers.TrainingParser(training_file_path )
if gold_file_path:
gold_data_flow = parsers.GoldParser(gold_file_path)
batch.update({"labels" : []})
for batch_count, sentence in enumerate(training_data_flow.parse(), start = 1):
#training mode
if gold_file_path:
labels = gold_data_flow.parse()
output = prepare_sentence(sentence, antivocab, output_vocab, labels)
batch['sentences'].append(output['sentence'])
batch['candidates'].append(output['candidates'])
batch['labels'].append(output['labels'])
#evaulation mode
else:
output = prepare_sentence(sentence, antivocab, output_vocab)
batch['sentences'].append(output['sentence'])
batch['candidates'].append(output['candidates'])
if int(batch_count)%int(batch_size)==0:
for key in batch.keys():
batch[key] = apply_padding(batch, key, maxlen = PADDING_SIZE, value = 1)
#TO DO:
if gold_file_path:
x, y = batch['sentences'], np.expand_dims(batch['labels'], axis=-1)
x, y = shuffle(x, y)
yield x, y
else:
yield batch['sentences'], batch['candidates']
batch = {"sentences" : [], "candidates" : []}
if gold_file_path:
batch.update({"labels" : []})
if batch_count>0:
for key in batch.keys():
batch[key] = apply_padding(batch, key, maxlen = PADDING_SIZE, value = 1)
batch_count = 0
if gold_file_path:
x, y = batch['sentences'], np.expand_dims(batch['labels'], axis=-1)
x, y = shuffle(x, y)
yield x, y
else:
yield batch['sentences'], batch['candidates']
def apply_padding(output, key, maxlen=50, value=1):
"""
Applies padding to output sequences
param output: dict
param key: key of dict
param maxlen: length to pad
param value: pad with this value
return padded list of lists
"""
x = output[key]
if key == 'candidates':
for candidate in range(len(x)):
x[candidate] = x[candidate] + [[value]] * (maxlen-len(x[candidate]))
return x
else:
return K.preprocessing.sequence.pad_sequences(x, truncating='pre', padding='post', maxlen=maxlen, value = value )
def prepare_sentence(sentence, antivocab, output_vocab, labels=None):
"""
Prepares an output sentence consisting of the sentence itself along with labels and candidates
param sentence:
param antivocab:
param output_vocab:
param labels:
return output: dict with keys: sentence, labels, candidates all list type objects
"""
records = namedtuple("Training", "id_ lemma pos instance")
output = {"sentence" : [], "labels" : [], "candidates": []}
for entry in sentence:
id_, lemma, pos, instance = entry
output_word = utils.map_word_from_dict(lemma, pos, antivocab, output_vocab, instance)
output['sentence'].append(output_word)
if id_ is None:
output['labels'].append(output_word)
candidates = [output_word]
else:
if labels is not None:
current_label = labels.__next__()
assert current_label.id_ == id_, "ID mismatch"
sense = current_label.senses[0]
sense = output_vocab[sense] if sense in output_vocab else output_vocab["<UNK>"]
output['labels'].append(sense)
candidates = utils.candidate_synsets(lemma, pos)
candidates = [utils.map_word_from_dict(c, "X", antivocab, output_vocab, instance=True) for c in candidates]
output['candidates'].append(candidates)
return output | [
"collections.namedtuple",
"parsers.GoldParser",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"utils.candidate_synsets",
"sklearn.utils.shuffle",
"utils.map_word_from_dict",
"numpy.expand_dims",
"parsers.TrainingParser"
] | [((3912, 3960), 'collections.namedtuple', 'namedtuple', (['"""Training"""', '"""id_ lemma pos instance"""'], {}), "('Training', 'id_ lemma pos instance')\n", (3922, 3960), False, 'from collections import namedtuple\n'), ((903, 945), 'parsers.TrainingParser', 'parsers.TrainingParser', (['training_file_path'], {}), '(training_file_path)\n', (925, 945), False, 'import parsers\n'), ((3397, 3504), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'K.preprocessing.sequence.pad_sequences', (['x'], {'truncating': '"""pre"""', 'padding': '"""post"""', 'maxlen': 'maxlen', 'value': 'value'}), "(x, truncating='pre', padding='post',\n maxlen=maxlen, value=value)\n", (3435, 3504), True, 'import tensorflow.keras as K\n'), ((4135, 4206), 'utils.map_word_from_dict', 'utils.map_word_from_dict', (['lemma', 'pos', 'antivocab', 'output_vocab', 'instance'], {}), '(lemma, pos, antivocab, output_vocab, instance)\n', (4159, 4206), False, 'import utils\n'), ((1003, 1037), 'parsers.GoldParser', 'parsers.GoldParser', (['gold_file_path'], {}), '(gold_file_path)\n', (1021, 1037), False, 'import parsers\n'), ((4795, 4830), 'utils.candidate_synsets', 'utils.candidate_synsets', (['lemma', 'pos'], {}), '(lemma, pos)\n', (4818, 4830), False, 'import utils\n'), ((315, 357), 'parsers.TrainingParser', 'parsers.TrainingParser', (['training_file_path'], {}), '(training_file_path)\n', (337, 357), False, 'import parsers\n'), ((2803, 2816), 'sklearn.utils.shuffle', 'shuffle', (['x', 'y'], {}), '(x, y)\n', (2810, 2816), False, 'from sklearn.utils import shuffle\n'), ((4861, 4933), 'utils.map_word_from_dict', 'utils.map_word_from_dict', (['c', '"""X"""', 'antivocab', 'output_vocab'], {'instance': '(True)'}), "(c, 'X', antivocab, output_vocab, instance=True)\n", (4885, 4933), False, 'import utils\n'), ((2178, 2191), 'sklearn.utils.shuffle', 'shuffle', (['x', 'y'], {}), '(x, y)\n', (2185, 2191), False, 'from sklearn.utils import shuffle\n'), ((2739, 2779), 'numpy.expand_dims', 'np.expand_dims', (["batch['labels']"], {'axis': '(-1)'}), "(batch['labels'], axis=-1)\n", (2753, 2779), True, 'import numpy as np\n'), ((2110, 2150), 'numpy.expand_dims', 'np.expand_dims', (["batch['labels']"], {'axis': '(-1)'}), "(batch['labels'], axis=-1)\n", (2124, 2150), True, 'import numpy as np\n')] |
from __future__ import print_function, division
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import TA_functions as taf
'''
This script crates the plots in DeltaV2-DeltaV3 space, that compare the 3 tests ran.
TEST1 - Average positions P1 and P2, transform to V2-V3 space, and compare to average
reference positions (V2-V3 space)
TEST2 - Transform individual positions P1 and P2 to V2-V3 space, average V2-V3 space
positions, and compare to average reference positions.
TEST3 - Transform P1 and P2 individually to V2-V3 space and compare star by star and
position by position.
'''
def v2theta_plot(case, meanV2, theta, save_plot=False, show_plot=False, destination=None):
"""
This function creates the plot in V2-theta space of the 3 tests: averaging in pixel space, averaging on sky,
and no averaging.
Args:
case -- string, for example '491Scene1_rapid_real_bgFrac0.3'
meanV2 -- list of 3 numpy array of theta values of V2 for Tests 1, 2, and 3
theta -- list of 3 numpy array of theta values for Tests 1, 2, and 3
save_plot -- True or False
show_plot -- True or False
destination -- string, destination directory
Returns:
"""
# Set the paths
results_path = os.path.abspath('../plots4presentationIST')
# check if the plot is for an Nk set
basename = case
if not isinstance(meanV2, float):
basename = case+'_'+str(len(meanV2[0]))+'samples'
# Make the plot of V2-THETA
plot_title = r'Residual Mean Calculated Angle, $\theta$'
fig1 = plt.figure(1, figsize=(12, 10))
ax1 = fig1.add_subplot(111)
plt.suptitle(plot_title, fontsize=18, y=0.96)
plt.title(basename)
plt.xlabel(r'$\Delta$V2')
plt.ylabel(r'$\theta$')
xmin, xmax = -0.01, 0.01
ymin, ymax = -40.0, 40.0
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.hlines(0.0, xmin, xmax*2, colors='k', linestyles='dashed')
plt.vlines(0.0, ymin, ymax*2, colors='k', linestyles='dashed')
plt.plot(meanV2[0], theta[0], 'b^', ms=10, alpha=0.7, label='Avg in Pixel Space')
plt.plot(meanV2[1], theta[1], 'go', ms=10, alpha=0.7, label='Avg in Sky')
plt.plot(meanV2[2], theta[2], 'r*', ms=13, alpha=0.7, label='No Avg')
# Shrink current axis by 10%
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.85, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # put legend out of the plot box
if isinstance(meanV2, float):
textinfig = r'V2$\mean$ = %0.2f $\theta$ = %0.2f' % (meanV2, theta)
ax1.annotate(textinfig, xy=(1.02, 0.35), xycoords='axes fraction' )
if save_plot:
if destination is not None:
fig_name = os.path.join(destination, 'thetaV2_'+basename+'.jpg')
else:
fig_name = os.path.join(results_path, 'thetaV2_'+basename+'.jpg')
fig1.savefig(fig_name)
print ("\n Plot saved: ", fig_name)
if show_plot:
plt.show()
else:
plt.close('all')
def v3theta_plot(case, meanV3, theta, save_plot=False, show_plot=False, destination=None):
"""
This function creates the plot in V3-theta space of the 3 tests: averaging in pixel space, averaging on sky,
and no averaging.
Args:
case -- string, for example '491Scene1_rapid_real_bgFrac0.3'
meanV3 -- list of 3 numpy array of theta values of V3 for Tests 1, 2, and 3
theta -- list of 3 numpy array of theta for Tests 1, 2, and 3
save_plot -- True or False
show_plot -- True or False
destination -- string, destination directory
Returns:
"""
# Set the paths
results_path = os.path.abspath('../plots4presentationIST')
# check if the plot is for an Nk set
basename = case
if not isinstance(meanV3, float):
basename = case+'_'+str(len(meanV3[0]))+'samples'
# Make the plot of V3-THETA
plot_title = r'Residual Mean Calculated Angle, $\theta$'
fig1 = plt.figure(1, figsize=(12, 10))
ax1 = fig1.add_subplot(111)
plt.suptitle(plot_title, fontsize=18, y=0.96)
plt.title(basename)
plt.xlabel(r'$\theta$')
plt.ylabel(r'$\Delta$V3')
xmin, xmax = -40.0, 40.0
ymin, ymax = -0.02, 0.02
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.hlines(0.0, xmin, xmax*2, colors='k', linestyles='dashed')
plt.vlines(0.0, ymin, ymax*2, colors='k', linestyles='dashed')
plt.plot(theta[0], meanV3[0], 'b^', ms=10, alpha=0.7, label='Avg in Pixel Space')
plt.plot(theta[1], meanV3[1], 'go', ms=10, alpha=0.7, label='Avg in Sky')
plt.plot(theta[2], meanV3[2], 'r*', ms=13, alpha=0.7, label='No Avg')
# Shrink current axis by 10%
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.85, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # put legend out of the plot box
if isinstance(meanV2, float):
textinfig = r'V3$\mean$ = %0.2f $\theta$ = %0.2f' % (meanV3, theta)
ax1.annotate(textinfig, xy=(1.02, 0.35), xycoords='axes fraction' )
if save_plot:
if destination is not None:
fig_name = os.path.join(destination, 'thetaV3_'+basename+'.jpg')
else:
fig_name = os.path.join(results_path, 'thetaV3_'+basename+'.jpg')
fig1.savefig(fig_name)
print ("\n Plot saved: ", fig_name)
if show_plot:
plt.show()
else:
plt.close('all')
def theta_plot(case, theta, save_plot=False, show_plot=False, destination=None, print_side_values=None):
"""
This function creates the plot of theta for the 3 tests: averaging in pixel space, averaging on sky,
and no averaging.
Args:
case -- string, for example '491Scene1_rapid_real_bgFrac0.3'
theta -- list of 3 numpy array of theta for Tests 1, 2, and 3
save_plot -- True or False
show_plot -- True or False
destination -- string, destination directory
Returns:
"""
# Set the paths
results_path = os.path.abspath('../plots4presentationIST')
# check if the plot is for an Nk set
basename = case
#if not isinstance(theta, float):
# basename = case+'_'+str(len(theta[0]))+'samples'
# Make the plot of THETA
plot_title = r'Residual Mean Calculated Angle, $\theta$'
fig1 = plt.figure(1, figsize=(12, 10))
ax1 = fig1.add_subplot(111)
plt.suptitle(plot_title, fontsize=18, y=0.96)
plt.title(basename)
plt.xlabel('Sample Number')
plt.ylabel(r'$\theta$ [marcsec]')
xmin, xmax = -500.0, 5500.0
#ymin, ymax = -40.0, 40.0
ymin, ymax = min(theta[2])+min(theta[2])*0.2, max(theta[2])+max(theta[2])*0.2
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
#ax = plt.gca()
# recompute the ax.dataLim
#ax.relim()
# update ax.viewLim using the new dataLim
#ax.autoscale_view()
plt.hlines(0.0, xmin, xmax, colors='k', linestyles='dashed')
plt.vlines(0.0, ymin, ymax, colors='k', linestyles='dashed')
plt.plot(theta[0], 'b^', ms=10, alpha=0.7, label='Avg in Pixel Space')
plt.plot(theta[1], 'go', ms=10, alpha=0.7, label='Avg in Sky')
plt.plot(theta[2], 'r*', ms=13, alpha=0.7, label='No Avg')
# Shrink current axis by 20%
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.6)) # put legend out of the plot box
if print_side_values is not None:
# standard deviations and means of theta-axis for the 3 tests
textinfig0 = '{:<13}'.format('Theta Standard Deviations and Means')
textinfig1 = r'$\sigma(AvgPix)$={:<6.2f} $\mu(AvgPix)$={:<6.2f}'.format(print_side_values[0], print_side_values[1])
textinfig2 = r'$\sigma(AvgSky)$={:<6.2f} $\mu(AvgSky)$={:<6.2f}'.format(print_side_values[2], print_side_values[3])
textinfig3 = r'$ \sigma(NoAvg)$={:<6.2f} $\mu(NoAvg)$={:<6.2f}'.format(print_side_values[4], print_side_values[5])
ax1.annotate(textinfig0, xy=(1.02, 0.48), xycoords='axes fraction' )
ax1.annotate(textinfig1, xy=(1.02, 0.45), xycoords='axes fraction' )
ax1.annotate(textinfig2, xy=(1.02, 0.42), xycoords='axes fraction' )
ax1.annotate(textinfig3, xy=(1.02, 0.39), xycoords='axes fraction' )
if save_plot:
if destination is not None:
fig_name = os.path.join(destination, basename+'_thetas.jpg')
else:
fig_name = os.path.join(results_path, 'theta_'+basename+'.jpg')
fig1.savefig(fig_name)
print ("\n Plot saved: ", fig_name)
if show_plot:
plt.show()
else:
plt.close('all')
def make_plot(cwincase, arrx, arry, xlabel, ylabel, plot_title=None, labels_list=None, xlims=None, ylims=None,
print_side_string = None, print_side_values=None,
save_plot=False, show_plot=True, destination=None, star_sample=None, square=True):
'''
This function creates a plot of the given arrays for the 3 tests.
Args:
cwincase: string, for example '491Scene1_rapid_real_bgFrac0.3_Nsigma2' (this will be the subtitle)
arrx: list of 3 numpy arrays
arry: list of 3 numpy arrays
xlabel: string, name of x-axis
ylabel: string, name of y-axis
plot_title: string, title of the plot
labels_list: list of 3 strings
xlims: list, limits of x-axis
ylims: list, limits of y-axis
print_side_string: list, strings to print on the side (sigma or mu)
print_side_values: list, values to print on the side (standard deviations or means)
save_plot: True or False
show_plot: True or False
destination: path and name of the resulting plot
Returns:
Nothing
'''
fig1 = plt.figure(1, figsize=(12, 10))
ax1 = fig1.add_subplot(111)
plt.suptitle(plot_title, fontsize=18, y=0.96)
plt.title(cwincase)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if xlims is None:
x = np.abs(arrx[0])
xmax = max(x)+max(x)*0.2
xlims = [-1*xmax, xmax]
if ylims is None:
y = np.abs(arry[0])
ymax = max(y)+max(y)*0.2
ylims = [-1*ymax, ymax]
# Compare which one is larger and use that one
if square:
if xlims[1] > ylims[1]:
ylims = xlims
else:
xlims = ylims
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[0], ylims[1])
plt.hlines(0.0, xlims[0], xlims[1], colors='k', linestyles='dashed')
plt.vlines(0.0, ylims[0], ylims[1], colors='k', linestyles='dashed')
plt.plot(arrx[0], arry[0], 'b^', ms=10, alpha=0.5, label=labels_list[0])
if len(arrx) != 1:
plt.plot(arrx[1], arry[1], 'go', ms=10, alpha=0.5, label=labels_list[1])
plt.plot(arrx[2], arry[2], 'r*', ms=13, alpha=0.5, label=labels_list[2])
if star_sample is not None:
if len(arrx) == 3:
stars_sample1, stars_sample2, stars_sample3 = star_sample
# double the lenght of the list for test 3 because position 2 after position 1
new_star_sample3 = []
for position in range(2):
for st in stars_sample3:
new_star_sample3.append(st)
else:
stars_sample1 = star_sample[0]
x_reject, y_reject = [-0.05, 0.05], [-0.05, 0.05]
# for test1 and 2
for si, xi, yi in zip(stars_sample1, arrx[0], arry[0]):
if yi >= y_reject[1] or yi <= y_reject[0] or xi >= x_reject[1] or xi <= x_reject[0]:
si = int(si)
subxcoord = 5
subycoord = 0
side = 'left'
plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')
if len(arrx) == 1:
if len(arrx) == 2*len(stars_sample1): # then we are dealing with TEST3 data
new_star_sample3 = []
for position in range(2):
for st in stars_sample3:
new_star_sample3.append(st)
for si, xi, yi in zip(new_star_sample3, arrx[0], arry[0]):
if yi >= y_reject[1] or yi <= y_reject[0] or xi >= x_reject[1] or xi <= x_reject[0]:
si = int(si)
subxcoord = 5
subycoord = 0
side = 'left'
plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')
else:
# for test3
for si, xi, yi in zip(new_star_sample3, arrx[2], arry[2]):
if yi >= y_reject[1] or yi <= y_reject[0] or xi >= x_reject[1] or xi <= x_reject[0]:
si = int(si)
subxcoord = 5
subycoord = 0
side = 'left'
plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')
# Shrink current axis by 20%
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.7)) # put legend out of the plot box
if print_side_values is not None:
# standard deviations and means of x-axis for the 3 tests
textinfig0 = '{:<13}'.format(print_side_string[0])
ax1.annotate(textinfig0, xy=(1.02, 0.58), xycoords='axes fraction' )
# standard deviations and means of y-axis for the 3 tests
textinfig0 = r'{:<13}'.format(print_side_string[1])
ax1.annotate(textinfig0, xy=(1.02, 0.43), xycoords='axes fraction' )
if len(arrx) == 3:
#print_side_values = [0 T1sigmaV2, 1 T1meanV2, 2 T2sigmaV2, 3 T2meanV2, 4 T3sigmaV2, 5 T3meanV2,
# 6 T1sigmaV3, 7 T1meanV3, 8 T2sigmaV3, 9 T2meanV3, 10 T3sigmaV3, 11 T3meanV3]
# standard deviations and means of x-axis for the 3 tests
textinfig1 = r'$\sigma(AvgPix)$={:<6.2f} $\mu(AvgPix)$={:<6.2f}'.format(print_side_values[0], print_side_values[1])
textinfig2 = r'$\sigma(AvgSky)$={:<6.2f} $\mu(AvgSky)$={:<6.2f}'.format(print_side_values[2], print_side_values[3])
textinfig3 = r'$ \sigma(NoAvg)$={:<6.2f} $\mu(NoAvg)$={:<6.2f}'.format(print_side_values[4], print_side_values[5])
ax1.annotate(textinfig1, xy=(1.02, 0.55), xycoords='axes fraction' )
ax1.annotate(textinfig2, xy=(1.02, 0.52), xycoords='axes fraction' )
ax1.annotate(textinfig3, xy=(1.02, 0.49), xycoords='axes fraction' )
# standard deviations and means of y-axis for the 3 tests
textinfig1 = r'$\sigma(AvgPix)$={:<6.2f} $\mu(AvgPix)$={:<6.2f}'.format(print_side_values[6], print_side_values[7])
textinfig2 = r'$\sigma(AvgSky)$={:<6.2f} $\mu(AvgSky)$={:<6.2f}'.format(print_side_values[8], print_side_values[9])
textinfig3 = r' $\sigma(NoAvg)$={:<6.2f} $\mu(NoAvg)$={:<6.2f}'.format(print_side_values[10], print_side_values[11])
ax1.annotate(textinfig1, xy=(1.02, 0.40), xycoords='axes fraction' )
ax1.annotate(textinfig2, xy=(1.02, 0.37), xycoords='axes fraction' )
ax1.annotate(textinfig3, xy=(1.02, 0.34), xycoords='axes fraction' )
else:
# standard deviations and means of x-axis
textinfig1 = r'$\sigma$={:<6.2f} $\mu$={:<6.2f}'.format(print_side_values[0], print_side_values[1])
ax1.annotate(textinfig1, xy=(1.02, 0.55), xycoords='axes fraction' )
# standard deviations and means of y-axis
textinfig1 = r'$\sigma$={:<6.2f} $\mu$={:<6.2f}'.format(print_side_values[2], print_side_values[3])
ax1.annotate(textinfig1, xy=(1.02, 0.40), xycoords='axes fraction' )
if save_plot:
fig1.savefig(destination)
print ("\n Plot saved: ", destination)
if show_plot:
plt.show()
else:
plt.close('all')
def get_stdevmeans4print_side_values(muV2, muV3):
V2stdevs, V2means, V3stdevs, V3means = [], [], [], []
for mv2, mv3 in zip(muV2, muV3):
sd2, m2 = taf.find_std(mv2)
V2stdevs.append(sd2)
V2means.append(m2)
sd3, m3 = taf.find_std(mv3)
V3stdevs.append(sd3)
V3means.append(m3)
print_side_values = [V2stdevs[0], V2means[0], V2stdevs[1], V2means[1], V2stdevs[2], V2means[2],
V3stdevs[0], V3means[0], V3stdevs[1], V3means[1], V3stdevs[2], V3means[2]]
return print_side_values
#######################################################################################################################
if __name__ == '__main__':
#### Set parameters
centroid_windows = [3, 5, 7]
min_elements = 4
Nsigma2plot = 2.5
stars_in_sample = 5
case = '2DetsScene1_rapid_real_bgFrac0.3_thres01'
save_plot = False
show_plot = True
milliarcsec = True # arcsec if set to False
used_abs_threshold = True # only plot least squares routine results if set to False
good_and_ugly_stars = True # only plot good stars if set to False
######################################################
# general path to text files
star_sample_dir = repr(stars_in_sample)+'_star_sample'
type_of_stars = 'only_good_stars'
if good_and_ugly_stars:
type_of_stars = 'good_and_uglies'
gen_path = os.path.abspath('../resultsXrandomstars/'+type_of_stars+'/'+star_sample_dir)
if used_abs_threshold and min_elements==4:
gen_path += '/abs_threshold'
elif used_abs_threshold and min_elements !=4:
gen_path += '/diff_min_elements_abs_threshold'
#results_path = os.path.abspath('../plots4presentationIST')
results_path = gen_path
if good_and_ugly_stars:
results_path = gen_path
print (gen_path)
# Loop over centroid_windows
if min_elements != 4:
case += '_minele'+repr(min_elements)
for cwin in centroid_windows:
# load the data fom the 3 tests
test_files_list = glob(os.path.join(gen_path, 'TEST*'+case+'*_Nsigma'+repr(Nsigma2plot)+'*'+repr(cwin)+'.txt'))
# 0 1 2 3 4 5 6 7 8
# data = sample, sigmaV2, sigmaV3, sigmaTheta, meanV2, meanV3, meanTheta, LastIter, RejStars
dataT1 = np.loadtxt(test_files_list[0], comments='#', unpack=True)
dataT2 = np.loadtxt(test_files_list[1], comments='#', unpack=True)
dataT3 = np.loadtxt(test_files_list[2], comments='#', unpack=True)
# compact variables and convert to milli arcsec
conversion = 1.0
if milliarcsec:
conversion = 1000.0
muV2 = [dataT1[4]*conversion, dataT2[4]*conversion, dataT3[4]*conversion]
muV3 = [dataT1[5]*conversion, dataT2[5]*conversion, dataT3[5]*conversion]
sigmaV2 = [dataT1[1]*conversion, dataT2[1]*conversion, dataT3[1]*conversion]
sigmaV3 = [dataT1[2]*conversion, dataT2[2]*conversion, dataT3[2]*conversion]
theta = [dataT1[6]*conversion, dataT2[6]*conversion, dataT3[6]*conversion]
cwincase = case+'_CentroidWin'+repr(cwin)+'_'+str(stars_in_sample)+'star'+str(len(dataT1[0]))+'samples'
if used_abs_threshold:
cwincase += '_withAbsThres'
# calculate mean of the means and standard deviation of the means
print_side_values = get_stdevmeans4print_side_values(muV2, muV3)
cwincase += '_Nsigma'+repr(Nsigma2plot)
# Means plot
xlabel, ylabel = r'$\Delta$V2 [marcsecs]', r'$\Delta$V3 [marcsecs]'
plot_title = 'Mean Residual Values'# for '+repr(len(sigmaV2[0]))+' samples of '+repr(stars_in_sample)+' stars'
labels_list = ['Avg in Pixel Space', 'Avg in Sky', 'No Avg']
#xlims, ylims = [-10.0, 10.0], [-10.0, 10.0]
#xlims, ylims = [-1100.0, 1100.0], [-1100.0, 1100.0]
xlims, ylims = None, None
print_side_string = [r'$\Delta$V2', r'$\Delta$V3']
destination = os.path.join(results_path, cwincase+'_means.jpg')
make_plot(cwincase, muV2, muV3, xlabel, ylabel, plot_title=plot_title, labels_list=labels_list,
xlims=xlims, ylims=ylims, print_side_string = print_side_string, print_side_values=print_side_values,
save_plot=save_plot, show_plot=show_plot, destination=destination)
# calculate mean of the sigmas and standard deviation of the sigmas
print_side_values = get_stdevmeans4print_side_values(sigmaV2, sigmaV3)
# Standard deviations plot
xlabel, ylabel = r'$\Delta$V2 [marcsecs]', r'$\Delta$V3 [marcsecs]'
plot_title = 'Standard Deviations'
labels_list = ['Avg in Pixel Space', 'Avg in Sky', 'No Avg']
#xlims, ylims = [-5.0, 50.0], [-5.0, 50.0]
#xlims, ylims = None, None
print_side_string = [r'$\Delta$V2', r'$\Delta$V3']
destination = os.path.join(results_path, cwincase+'_stdevs.jpg')
make_plot(cwincase, sigmaV2, sigmaV3, xlabel, ylabel, plot_title=plot_title, labels_list=labels_list,
xlims=xlims, ylims=ylims, print_side_string = print_side_string, print_side_values=print_side_values,
save_plot=save_plot, show_plot=show_plot, destination=destination)
# calculate mean of the thetas and standard deviation of the thetas
theta_stdevs, theta_means = [], []
for th in theta:
sd, m = taf.find_std(th)
theta_stdevs.append(sd)
theta_means.append(m)
print_side_values = [theta_stdevs[0], theta_stdevs[1], theta_stdevs[2],
theta_means[0], theta_means[1], theta_means[2]]
# Thetas plot
#v2theta_plot(cwincase, muV2, theta, save_plot=save_plot, show_plot=show_plot, destination=None)
#v3theta_plot(cwincase, muV3, theta, save_plot=save_plot, show_plot=show_plot, destination=None)
destination = results_path
theta_plot(cwincase, theta, save_plot=save_plot, show_plot=show_plot,
destination=destination, print_side_values=print_side_values)
| [
"numpy.abs",
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"TA_functions.find_std",
"os.path.abspath",
"matplotl... | [((1400, 1443), 'os.path.abspath', 'os.path.abspath', (['"""../plots4presentationIST"""'], {}), "('../plots4presentationIST')\n", (1415, 1443), False, 'import os\n'), ((1707, 1738), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(12, 10)'}), '(1, figsize=(12, 10))\n', (1717, 1738), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1820), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['plot_title'], {'fontsize': '(18)', 'y': '(0.96)'}), '(plot_title, fontsize=18, y=0.96)\n', (1787, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1844), 'matplotlib.pyplot.title', 'plt.title', (['basename'], {}), '(basename)\n', (1834, 1844), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1874), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\Delta$V2"""'], {}), "('$\\\\Delta$V2')\n", (1859, 1874), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (1889, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1965, 1985), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (1973, 1985), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2010), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (1998, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2079), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0.0)', 'xmin', '(xmax * 2)'], {'colors': '"""k"""', 'linestyles': '"""dashed"""'}), "(0.0, xmin, xmax * 2, colors='k', linestyles='dashed')\n", (2025, 2079), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2146), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(0.0)', 'ymin', '(ymax * 2)'], {'colors': '"""k"""', 'linestyles': '"""dashed"""'}), "(0.0, ymin, ymax * 2, colors='k', linestyles='dashed')\n", (2092, 2146), True, 'import matplotlib.pyplot as plt\n'), ((2149, 2235), 'matplotlib.pyplot.plot', 'plt.plot', (['meanV2[0]', 'theta[0]', '"""b^"""'], {'ms': '(10)', 'alpha': '(0.7)', 'label': '"""Avg in Pixel Space"""'}), "(meanV2[0], theta[0], 'b^', ms=10, alpha=0.7, label=\n 'Avg in Pixel Space')\n", (2157, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2235, 2308), 'matplotlib.pyplot.plot', 'plt.plot', (['meanV2[1]', 'theta[1]', '"""go"""'], {'ms': '(10)', 'alpha': '(0.7)', 'label': '"""Avg in Sky"""'}), "(meanV2[1], theta[1], 'go', ms=10, alpha=0.7, label='Avg in Sky')\n", (2243, 2308), True, 'import matplotlib.pyplot as plt\n'), ((2313, 2382), 'matplotlib.pyplot.plot', 'plt.plot', (['meanV2[2]', 'theta[2]', '"""r*"""'], {'ms': '(13)', 'alpha': '(0.7)', 'label': '"""No Avg"""'}), "(meanV2[2], theta[2], 'r*', ms=13, alpha=0.7, label='No Avg')\n", (2321, 2382), True, 'import matplotlib.pyplot as plt\n'), ((3885, 3928), 'os.path.abspath', 'os.path.abspath', (['"""../plots4presentationIST"""'], {}), "('../plots4presentationIST')\n", (3900, 3928), False, 'import os\n'), ((4192, 4223), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(12, 10)'}), '(1, figsize=(12, 10))\n', (4202, 4223), True, 'import matplotlib.pyplot as plt\n'), ((4260, 4305), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['plot_title'], {'fontsize': '(18)', 'y': '(0.96)'}), '(plot_title, fontsize=18, y=0.96)\n', (4272, 4305), True, 'import matplotlib.pyplot as plt\n'), ((4310, 4329), 'matplotlib.pyplot.title', 'plt.title', (['basename'], {}), '(basename)\n', (4319, 4329), True, 'import matplotlib.pyplot as plt\n'), ((4334, 4357), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {}), "('$\\\\theta$')\n", (4344, 4357), True, 'import matplotlib.pyplot as plt\n'), ((4362, 4387), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta$V3"""'], {}), "('$\\\\Delta$V3')\n", (4372, 4387), True, 'import matplotlib.pyplot as plt\n'), ((4450, 4470), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (4458, 4470), True, 'import matplotlib.pyplot as plt\n'), ((4475, 4495), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (4483, 4495), True, 'import matplotlib.pyplot as plt\n'), ((4500, 4564), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0.0)', 'xmin', '(xmax * 2)'], {'colors': '"""k"""', 'linestyles': '"""dashed"""'}), "(0.0, xmin, xmax * 2, colors='k', linestyles='dashed')\n", (4510, 4564), True, 'import matplotlib.pyplot as plt\n'), ((4567, 4631), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(0.0)', 'ymin', '(ymax * 2)'], {'colors': '"""k"""', 'linestyles': '"""dashed"""'}), "(0.0, ymin, ymax * 2, colors='k', linestyles='dashed')\n", (4577, 4631), True, 'import matplotlib.pyplot as plt\n'), ((4634, 4720), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[0]', 'meanV3[0]', '"""b^"""'], {'ms': '(10)', 'alpha': '(0.7)', 'label': '"""Avg in Pixel Space"""'}), "(theta[0], meanV3[0], 'b^', ms=10, alpha=0.7, label=\n 'Avg in Pixel Space')\n", (4642, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4720, 4793), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[1]', 'meanV3[1]', '"""go"""'], {'ms': '(10)', 'alpha': '(0.7)', 'label': '"""Avg in Sky"""'}), "(theta[1], meanV3[1], 'go', ms=10, alpha=0.7, label='Avg in Sky')\n", (4728, 4793), True, 'import matplotlib.pyplot as plt\n'), ((4798, 4867), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[2]', 'meanV3[2]', '"""r*"""'], {'ms': '(13)', 'alpha': '(0.7)', 'label': '"""No Avg"""'}), "(theta[2], meanV3[2], 'r*', ms=13, alpha=0.7, label='No Avg')\n", (4806, 4867), True, 'import matplotlib.pyplot as plt\n'), ((6280, 6323), 'os.path.abspath', 'os.path.abspath', (['"""../plots4presentationIST"""'], {}), "('../plots4presentationIST')\n", (6295, 6323), False, 'import os\n'), ((6584, 6615), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(12, 10)'}), '(1, figsize=(12, 10))\n', (6594, 6615), True, 'import matplotlib.pyplot as plt\n'), ((6652, 6697), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['plot_title'], {'fontsize': '(18)', 'y': '(0.96)'}), '(plot_title, fontsize=18, y=0.96)\n', (6664, 6697), True, 'import matplotlib.pyplot as plt\n'), ((6702, 6721), 'matplotlib.pyplot.title', 'plt.title', (['basename'], {}), '(basename)\n', (6711, 6721), True, 'import matplotlib.pyplot as plt\n'), ((6726, 6753), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample Number"""'], {}), "('Sample Number')\n", (6736, 6753), True, 'import matplotlib.pyplot as plt\n'), ((6758, 6792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\theta$ [marcsec]"""'], {}), "('$\\\\theta$ [marcsec]')\n", (6768, 6792), True, 'import matplotlib.pyplot as plt\n'), ((6941, 6961), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (6949, 6961), True, 'import matplotlib.pyplot as plt\n'), ((6966, 6986), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (6974, 6986), True, 'import matplotlib.pyplot as plt\n'), ((7129, 7189), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0.0)', 'xmin', 'xmax'], {'colors': '"""k"""', 'linestyles': '"""dashed"""'}), "(0.0, xmin, xmax, colors='k', linestyles='dashed')\n", (7139, 7189), True, 'import matplotlib.pyplot as plt\n'), ((7194, 7254), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(0.0)', 'ymin', 'ymax'], {'colors': '"""k"""', 'linestyles': '"""dashed"""'}), "(0.0, ymin, ymax, colors='k', linestyles='dashed')\n", (7204, 7254), True, 'import matplotlib.pyplot as plt\n'), ((7259, 7329), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[0]', '"""b^"""'], {'ms': '(10)', 'alpha': '(0.7)', 'label': '"""Avg in Pixel Space"""'}), "(theta[0], 'b^', ms=10, alpha=0.7, label='Avg in Pixel Space')\n", (7267, 7329), True, 'import matplotlib.pyplot as plt\n'), ((7334, 7396), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[1]', '"""go"""'], {'ms': '(10)', 'alpha': '(0.7)', 'label': '"""Avg in Sky"""'}), "(theta[1], 'go', ms=10, alpha=0.7, label='Avg in Sky')\n", (7342, 7396), True, 'import matplotlib.pyplot as plt\n'), ((7401, 7459), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[2]', '"""r*"""'], {'ms': '(13)', 'alpha': '(0.7)', 'label': '"""No Avg"""'}), "(theta[2], 'r*', ms=13, alpha=0.7, label='No Avg')\n", (7409, 7459), True, 'import matplotlib.pyplot as plt\n'), ((10034, 10065), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(12, 10)'}), '(1, figsize=(12, 10))\n', (10044, 10065), True, 'import matplotlib.pyplot as plt\n'), ((10102, 10147), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['plot_title'], {'fontsize': '(18)', 'y': '(0.96)'}), '(plot_title, fontsize=18, y=0.96)\n', (10114, 10147), True, 'import matplotlib.pyplot as plt\n'), ((10152, 10171), 'matplotlib.pyplot.title', 'plt.title', (['cwincase'], {}), '(cwincase)\n', (10161, 10171), True, 'import matplotlib.pyplot as plt\n'), ((10176, 10194), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (10186, 10194), True, 'import matplotlib.pyplot as plt\n'), ((10199, 10217), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (10209, 10217), True, 'import matplotlib.pyplot as plt\n'), ((10616, 10644), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims[0]', 'xlims[1]'], {}), '(xlims[0], xlims[1])\n', (10624, 10644), True, 'import matplotlib.pyplot as plt\n'), ((10649, 10677), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylims[0]', 'ylims[1]'], {}), '(ylims[0], ylims[1])\n', (10657, 10677), True, 'import matplotlib.pyplot as plt\n'), ((10682, 10750), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0.0)', 'xlims[0]', 'xlims[1]'], {'colors': '"""k"""', 'linestyles': '"""dashed"""'}), "(0.0, xlims[0], xlims[1], colors='k', linestyles='dashed')\n", (10692, 10750), True, 'import matplotlib.pyplot as plt\n'), ((10755, 10823), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(0.0)', 'ylims[0]', 'ylims[1]'], {'colors': '"""k"""', 'linestyles': '"""dashed"""'}), "(0.0, ylims[0], ylims[1], colors='k', linestyles='dashed')\n", (10765, 10823), True, 'import matplotlib.pyplot as plt\n'), ((10828, 10900), 'matplotlib.pyplot.plot', 'plt.plot', (['arrx[0]', 'arry[0]', '"""b^"""'], {'ms': '(10)', 'alpha': '(0.5)', 'label': 'labels_list[0]'}), "(arrx[0], arry[0], 'b^', ms=10, alpha=0.5, label=labels_list[0])\n", (10836, 10900), True, 'import matplotlib.pyplot as plt\n'), ((17678, 17764), 'os.path.abspath', 'os.path.abspath', (["('../resultsXrandomstars/' + type_of_stars + '/' + star_sample_dir)"], {}), "('../resultsXrandomstars/' + type_of_stars + '/' +\n star_sample_dir)\n", (17693, 17764), False, 'import os\n'), ((3121, 3131), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3129, 3131), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3166), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3159, 3166), True, 'import matplotlib.pyplot as plt\n'), ((5606, 5616), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5614, 5616), True, 'import matplotlib.pyplot as plt\n'), ((5635, 5651), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5644, 5651), True, 'import matplotlib.pyplot as plt\n'), ((8866, 8876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8874, 8876), True, 'import matplotlib.pyplot as plt\n'), ((8895, 8911), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8904, 8911), True, 'import matplotlib.pyplot as plt\n'), ((10252, 10267), 'numpy.abs', 'np.abs', (['arrx[0]'], {}), '(arrx[0])\n', (10258, 10267), True, 'import numpy as np\n'), ((10367, 10382), 'numpy.abs', 'np.abs', (['arry[0]'], {}), '(arry[0])\n', (10373, 10382), True, 'import numpy as np\n'), ((10932, 11004), 'matplotlib.pyplot.plot', 'plt.plot', (['arrx[1]', 'arry[1]', '"""go"""'], {'ms': '(10)', 'alpha': '(0.5)', 'label': 'labels_list[1]'}), "(arrx[1], arry[1], 'go', ms=10, alpha=0.5, label=labels_list[1])\n", (10940, 11004), True, 'import matplotlib.pyplot as plt\n'), ((11013, 11085), 'matplotlib.pyplot.plot', 'plt.plot', (['arrx[2]', 'arry[2]', '"""r*"""'], {'ms': '(13)', 'alpha': '(0.5)', 'label': 'labels_list[2]'}), "(arrx[2], arry[2], 'r*', ms=13, alpha=0.5, label=labels_list[2])\n", (11021, 11085), True, 'import matplotlib.pyplot as plt\n'), ((16191, 16201), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16199, 16201), True, 'import matplotlib.pyplot as plt\n'), ((16220, 16236), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16229, 16236), True, 'import matplotlib.pyplot as plt\n'), ((16402, 16419), 'TA_functions.find_std', 'taf.find_std', (['mv2'], {}), '(mv2)\n', (16414, 16419), True, 'import TA_functions as taf\n'), ((16494, 16511), 'TA_functions.find_std', 'taf.find_std', (['mv3'], {}), '(mv3)\n', (16506, 16511), True, 'import TA_functions as taf\n'), ((18631, 18688), 'numpy.loadtxt', 'np.loadtxt', (['test_files_list[0]'], {'comments': '"""#"""', 'unpack': '(True)'}), "(test_files_list[0], comments='#', unpack=True)\n", (18641, 18688), True, 'import numpy as np\n'), ((18706, 18763), 'numpy.loadtxt', 'np.loadtxt', (['test_files_list[1]'], {'comments': '"""#"""', 'unpack': '(True)'}), "(test_files_list[1], comments='#', unpack=True)\n", (18716, 18763), True, 'import numpy as np\n'), ((18781, 18838), 'numpy.loadtxt', 'np.loadtxt', (['test_files_list[2]'], {'comments': '"""#"""', 'unpack': '(True)'}), "(test_files_list[2], comments='#', unpack=True)\n", (18791, 18838), True, 'import numpy as np\n'), ((20288, 20339), 'os.path.join', 'os.path.join', (['results_path', "(cwincase + '_means.jpg')"], {}), "(results_path, cwincase + '_means.jpg')\n", (20300, 20339), False, 'import os\n'), ((21194, 21246), 'os.path.join', 'os.path.join', (['results_path', "(cwincase + '_stdevs.jpg')"], {}), "(results_path, cwincase + '_stdevs.jpg')\n", (21206, 21246), False, 'import os\n'), ((2874, 2931), 'os.path.join', 'os.path.join', (['destination', "('thetaV2_' + basename + '.jpg')"], {}), "(destination, 'thetaV2_' + basename + '.jpg')\n", (2886, 2931), False, 'import os\n'), ((2965, 3023), 'os.path.join', 'os.path.join', (['results_path', "('thetaV2_' + basename + '.jpg')"], {}), "(results_path, 'thetaV2_' + basename + '.jpg')\n", (2977, 3023), False, 'import os\n'), ((5359, 5416), 'os.path.join', 'os.path.join', (['destination', "('thetaV3_' + basename + '.jpg')"], {}), "(destination, 'thetaV3_' + basename + '.jpg')\n", (5371, 5416), False, 'import os\n'), ((5450, 5508), 'os.path.join', 'os.path.join', (['results_path', "('thetaV3_' + basename + '.jpg')"], {}), "(results_path, 'thetaV3_' + basename + '.jpg')\n", (5462, 5508), False, 'import os\n'), ((8625, 8676), 'os.path.join', 'os.path.join', (['destination', "(basename + '_thetas.jpg')"], {}), "(destination, basename + '_thetas.jpg')\n", (8637, 8676), False, 'import os\n'), ((8712, 8768), 'os.path.join', 'os.path.join', (['results_path', "('theta_' + basename + '.jpg')"], {}), "(results_path, 'theta_' + basename + '.jpg')\n", (8724, 8768), False, 'import os\n'), ((21726, 21742), 'TA_functions.find_std', 'taf.find_std', (['th'], {}), '(th)\n', (21738, 21742), True, 'import TA_functions as taf\n')] |
import numdifftools as nd
import numpy as np
import pandas as pd
from estimagic.decorators import numpy_interface
from estimagic.differentiation import differentiation_auxiliary as aux
def gradient(
func,
params,
method="central",
extrapolation=True,
func_kwargs=None,
step_options=None,
):
"""
Calculate the gradient of *func*.
Args:
func (function): A function that maps params into a float.
params (DataFrame): see :ref:`params`
method (str): The method for the computation of the derivative. Default is
central as it gives the highest accuracy.
extrapolation (bool): This variable allows to specify the use of the
richardson extrapolation.
func_kwargs (dict): additional positional arguments for func.
step_options (dict): Options for the numdifftools step generator.
See :ref:`step_options`
Returns:
Series: The index is the index of params, the values contain the estimated
gradient.
"""
step_options = step_options if step_options is not None else {}
if method not in ["central", "forward", "backward"]:
raise ValueError("Method has to be in ['central', 'forward', 'backward']")
func_kwargs = {} if func_kwargs is None else func_kwargs
internal_func = _create_internal_func(func, params, func_kwargs)
params_value = params["value"].to_numpy()
if extrapolation:
grad_np = nd.Gradient(internal_func, method=method, **step_options)(
params_value
)
else:
grad_np = _no_extrapolation_gradient(internal_func, params_value, method)
return pd.Series(data=grad_np, index=params.index, name="gradient")
def _no_extrapolation_gradient(internal_func, params_value, method):
grad = np.empty_like(params_value)
f_x0 = internal_func(params_value)
finite_diff = getattr(aux, method)
for i, val in enumerate(params_value):
h = (1 + abs(val)) * np.sqrt(np.finfo(float).eps)
grad[i] = finite_diff(internal_func, f_x0, params_value, i, h) / h
return grad
def jacobian(
func,
params,
method="central",
extrapolation=True,
func_kwargs=None,
step_options=None,
):
"""
Calculate the jacobian of *func*.
Args:
func (function): A function that maps params into a numpy array or pd.Series.
params (DataFrame): see :ref:`params`
method (string): The method for the computation of the derivative. Default is
central as it gives the highest accuracy.
extrapolation (bool): This variable allows to specify the use of the
richardson extrapolation.
func_kwargs (dict): additional positional arguments for func.
step_options (dict): Options for the numdifftools step generator.
See :ref:`step_options`
Returns:
DataFrame: If func returns a Series, the index is the index of this Series or
the index is 0,1,2... if func returns a numpy array. The columns are the
index of params.
"""
step_options = step_options if step_options is not None else {}
if method not in ["central", "forward", "backward"]:
raise ValueError("Method has to be in ['central', 'forward', 'backward']")
func_kwargs = {} if func_kwargs is None else func_kwargs
f_x0 = func(params, **func_kwargs)
internal_func = _create_internal_func(func, params, func_kwargs)
params_value = params["value"].to_numpy()
if extrapolation:
jac_np = nd.Jacobian(internal_func, method=method, **step_options)(params_value)
else:
jac_np = _no_extrapolation_jacobian(internal_func, params_value, method)
if isinstance(f_x0, pd.Series):
return pd.DataFrame(index=f_x0.index, columns=params.index, data=jac_np)
else:
return pd.DataFrame(columns=params.index, data=jac_np)
def _no_extrapolation_jacobian(internal_func, params_value, method):
f_x0_np = internal_func(params_value)
jac = np.empty((len(f_x0_np), len(params_value)))
finite_diff = getattr(aux, method)
for i, val in enumerate(params_value):
# The rule of thumb for the stepsize is implemented
h = (1 + abs(val)) * np.sqrt(np.finfo(float).eps)
f_diff = finite_diff(internal_func, f_x0_np, params_value, i, h)
jac[:, i] = f_diff / h
return jac
def hessian(
func,
params,
method="central",
extrapolation=True,
func_kwargs=None,
step_options=None,
):
"""
Calculate the hessian of *func*.
Args:
func (function): A function that maps params into a float.
params (DataFrame): see :ref:`params`
method (string): The method for the computation of the derivative. Default is
central as it gives the highest accuracy.
extrapolation (bool): Use richardson extrapolations.
func_kwargs (dict): additional positional arguments for func.
step_options (dict): Options for the numdifftools step generator.
See :ref:`step_options`
Returns:
DataFrame: The index and columns are the index of params. The data is
the estimated hessian.
"""
step_options = step_options if step_options is not None else {}
if method != "central":
raise ValueError("Only the method 'central' is supported.")
func_kwargs = {} if func_kwargs is None else func_kwargs
internal_func = _create_internal_func(func, params, func_kwargs)
params_value = params["value"].to_numpy()
if extrapolation:
hess_np = nd.Hessian(internal_func, method=method, **step_options)(params_value)
else:
hess_np = _no_extrapolation_hessian(internal_func, params_value, method)
return pd.DataFrame(data=hess_np, index=params.index, columns=params.index)
def _no_extrapolation_hessian(internal_func, params_value, method):
finite_diff = getattr(aux, method)
hess = np.empty((len(params_value), len(params_value)))
for i, val_1 in enumerate(params_value):
h_1 = (1.0 + abs(val_1)) * np.cbrt(np.finfo(float).eps)
for j, val_2 in enumerate(params_value):
h_2 = (1.0 + abs(val_2)) * np.cbrt(np.finfo(float).eps)
params_r = params_value.copy()
params_r[j] += h_2
# Calculate the first derivative w.r.t. var_1 at (params + h_2) with
# the central method. This is not the right f_x0, but the real one
# isn't needed for the central method.
f_plus = finite_diff(internal_func, None, params_r, i, h_1)
params_l = params_value.copy()
params_l[j] -= h_2
# Calculate the first derivative w.r.t. var_1 at (params - h_2) with
# the central method. This is not the right f_x0, but the real one
# isn't needed for the central method.
f_minus = finite_diff(internal_func, None, params_l, i, h_1)
f_diff = (f_plus - f_minus) / (2.0 * h_1 * h_2)
hess[i, j] = f_diff
hess[i, j] = f_diff
return hess
def _create_internal_func(func, params, func_kwargs):
@numpy_interface(params)
def internal_func(p):
func_value = func(p, **func_kwargs)
return func_value
return internal_func
| [
"pandas.Series",
"numdifftools.Hessian",
"numdifftools.Gradient",
"estimagic.decorators.numpy_interface",
"numpy.empty_like",
"numdifftools.Jacobian",
"pandas.DataFrame",
"numpy.finfo"
] | [((1674, 1734), 'pandas.Series', 'pd.Series', ([], {'data': 'grad_np', 'index': 'params.index', 'name': '"""gradient"""'}), "(data=grad_np, index=params.index, name='gradient')\n", (1683, 1734), True, 'import pandas as pd\n'), ((1817, 1844), 'numpy.empty_like', 'np.empty_like', (['params_value'], {}), '(params_value)\n', (1830, 1844), True, 'import numpy as np\n'), ((5814, 5882), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'hess_np', 'index': 'params.index', 'columns': 'params.index'}), '(data=hess_np, index=params.index, columns=params.index)\n', (5826, 5882), True, 'import pandas as pd\n'), ((7194, 7217), 'estimagic.decorators.numpy_interface', 'numpy_interface', (['params'], {}), '(params)\n', (7209, 7217), False, 'from estimagic.decorators import numpy_interface\n'), ((3802, 3867), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'f_x0.index', 'columns': 'params.index', 'data': 'jac_np'}), '(index=f_x0.index, columns=params.index, data=jac_np)\n', (3814, 3867), True, 'import pandas as pd\n'), ((3893, 3940), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'params.index', 'data': 'jac_np'}), '(columns=params.index, data=jac_np)\n', (3905, 3940), True, 'import pandas as pd\n'), ((1477, 1534), 'numdifftools.Gradient', 'nd.Gradient', (['internal_func'], {'method': 'method'}), '(internal_func, method=method, **step_options)\n', (1488, 1534), True, 'import numdifftools as nd\n'), ((3587, 3644), 'numdifftools.Jacobian', 'nd.Jacobian', (['internal_func'], {'method': 'method'}), '(internal_func, method=method, **step_options)\n', (3598, 3644), True, 'import numdifftools as nd\n'), ((5641, 5697), 'numdifftools.Hessian', 'nd.Hessian', (['internal_func'], {'method': 'method'}), '(internal_func, method=method, **step_options)\n', (5651, 5697), True, 'import numdifftools as nd\n'), ((2003, 2018), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2011, 2018), True, 'import numpy as np\n'), ((4287, 4302), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (4295, 4302), True, 'import numpy as np\n'), ((6140, 6155), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (6148, 6155), True, 'import numpy as np\n'), ((6257, 6272), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (6265, 6272), True, 'import numpy as np\n')] |
import os
import numpy as np
import paramiko
def connect(server="172.16.31.10", username="rutherford"):
""" Connect to server via ssh
"""
ssh = paramiko.SSHClient()
ssh.load_host_keys(
os.path.expanduser(os.path.join("~", ".ssh", "known_hosts"))
)
ssh.connect(server, username)
sftp = ssh.open_sftp()
return sftp, ssh
# vectorized haversine function
def haversine(lat1, lon1, lat2, lon2, to_radians=True, earth_radius=6371):
"""
slightly modified version: of http://stackoverflow.com/a/29546836/2901002
Calculate the great circle distance between two points
on the earth (specified in decimal degrees or in radians)
All (lat, lon) coordinates must have numeric dtypes and be of equal length.
"""
if to_radians:
lat1, lon1, lat2, lon2 = np.radians([lat1, lon1, lat2, lon2])
a = (
np.sin((lat2 - lat1) / 2.0) ** 2
+ np.cos(lat1) * np.cos(lat2) * np.sin((lon2 - lon1) / 2.0) ** 2
)
return earth_radius * 2 * np.arcsin(np.sqrt(a))
| [
"numpy.radians",
"numpy.sqrt",
"os.path.join",
"numpy.cos",
"numpy.sin",
"paramiko.SSHClient"
] | [((159, 179), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (177, 179), False, 'import paramiko\n'), ((822, 858), 'numpy.radians', 'np.radians', (['[lat1, lon1, lat2, lon2]'], {}), '([lat1, lon1, lat2, lon2])\n', (832, 858), True, 'import numpy as np\n'), ((231, 271), 'os.path.join', 'os.path.join', (['"""~"""', '""".ssh"""', '"""known_hosts"""'], {}), "('~', '.ssh', 'known_hosts')\n", (243, 271), False, 'import os\n'), ((878, 905), 'numpy.sin', 'np.sin', (['((lat2 - lat1) / 2.0)'], {}), '((lat2 - lat1) / 2.0)\n', (884, 905), True, 'import numpy as np\n'), ((1031, 1041), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (1038, 1041), True, 'import numpy as np\n'), ((921, 933), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (927, 933), True, 'import numpy as np\n'), ((936, 948), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (942, 948), True, 'import numpy as np\n'), ((951, 978), 'numpy.sin', 'np.sin', (['((lon2 - lon1) / 2.0)'], {}), '((lon2 - lon1) / 2.0)\n', (957, 978), True, 'import numpy as np\n')] |
from typing import Tuple, Dict, List
import numpy as np
from graph_nets.graphs import GraphsTuple
from .tf_tools import graphs_tuple_to_data_dicts, data_dicts_to_graphs_tuple
MIN_STD = 1E-6
class Standardizer:
@staticmethod
def compute_mean_std(a: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
return (
np.mean(a, axis=0),
np.maximum(np.std(a, axis=0), MIN_STD),
)
@staticmethod
def _standardize(a: np.ndarray, mean: np.ndarray, std: np.ndarray) -> np.ndarray:
return (a - mean) / std
@staticmethod
def _destandardize(a: np.ndarray, mean: np.ndarray, std: np.ndarray) -> np.ndarray:
return (a * std) + mean
class ArrayStandardizer(Standardizer):
def __init__(
self,
mean: np.ndarray = np.array([0]),
std: np.ndarray = np.array([1]),
):
self.mean, self.std = mean, std
def standardize(self, array) -> np.ndarray:
return self._standardize(a=array, mean=self.mean, std=self.std)
def destandardize(self, array) -> np.ndarray:
return self._destandardize(a=array, mean=self.mean, std=self.std)
@classmethod
def from_array(cls, array: np.ndarray):
mean, std = cls.compute_mean_std(a=array)
return cls(mean=mean, std=std)
class GraphStandardizer(Standardizer):
def __init__(
self,
global_mean: np.ndarray = np.array([0]),
global_std: np.ndarray = np.array([1]),
nodes_mean: np.ndarray = np.array([0]),
nodes_std: np.ndarray = np.array([1]),
edges_mean: np.ndarray = np.array([0]),
edges_std: np.ndarray = np.array([1]),
):
self.global_mean, self.global_std = global_mean, global_std
self.nodes_mean, self.nodes_std = nodes_mean, nodes_std
self.edges_mean, self.edges_std = edges_mean, edges_std
def standardize_graphs_tuple(self, graphs: GraphsTuple) -> GraphsTuple:
standard = graphs.replace(globals=self._standardize(graphs.globals, mean=self.global_mean, std=self.global_std))
standard = standard.replace(nodes=self._standardize(graphs.nodes, mean=self.nodes_mean, std=self.nodes_std))
standard = standard.replace(edges=self._standardize(graphs.edges, mean=self.edges_mean, std=self.edges_std))
return standard
def standardize_data_dict(self, d: Dict) -> Dict:
return graphs_tuple_to_data_dicts(self.standardize_graphs_tuple(data_dicts_to_graphs_tuple([d])))[0]
def destandardize_graphs_tuple(self, graphs: GraphsTuple) -> GraphsTuple:
standard_graphs = graphs.replace(
globals=self._destandardize(graphs.globals, mean=self.global_mean, std=self.global_std))
standard_graphs = standard_graphs.replace(
nodes=self._destandardize(graphs.nodes, mean=self.nodes_mean, std=self.nodes_std))
standard_graphs = standard_graphs.replace(
edges=self._destandardize(graphs.edges, mean=self.edges_mean, std=self.edges_std))
return standard_graphs
def destandardize_data_dicts(self, d: Dict) -> Dict:
return graphs_tuple_to_data_dicts(self.destandardize_graphs_tuple(data_dicts_to_graphs_tuple([d])))[0]
@classmethod
def from_graphs_tuple(cls, graphs_tuple: GraphsTuple):
global_mean, global_std = cls.compute_mean_std(graphs_tuple.globals)
nodes_mean, nodes_std = cls.compute_mean_std(graphs_tuple.nodes)
edges_mean, edges_std = cls.compute_mean_std(graphs_tuple.edges)
return cls(
global_mean=global_mean,
global_std=global_std,
nodes_mean=nodes_mean,
nodes_std=nodes_std,
edges_mean=edges_mean,
edges_std=edges_std,
)
@classmethod
def from_data_dicts(cls, dicts: List[Dict]):
return cls.from_graphs_tuple(data_dicts_to_graphs_tuple(dicts))
| [
"numpy.array",
"numpy.mean",
"numpy.std"
] | [((802, 815), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (810, 815), True, 'import numpy as np\n'), ((847, 860), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (855, 860), True, 'import numpy as np\n'), ((1421, 1434), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1429, 1434), True, 'import numpy as np\n'), ((1473, 1486), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1481, 1486), True, 'import numpy as np\n'), ((1525, 1538), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1533, 1538), True, 'import numpy as np\n'), ((1576, 1589), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1584, 1589), True, 'import numpy as np\n'), ((1628, 1641), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1636, 1641), True, 'import numpy as np\n'), ((1679, 1692), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1687, 1692), True, 'import numpy as np\n'), ((336, 354), 'numpy.mean', 'np.mean', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (343, 354), True, 'import numpy as np\n'), ((379, 396), 'numpy.std', 'np.std', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (385, 396), True, 'import numpy as np\n')] |
# TAREFAS:
# 1. Abrir todas as imagens em uma pasta (todas as imagens de uma pasta)
# 2. Redimensionar o tamanho de todas as imagem (por exemplo, para 600x600)
# 3. Rotacionar toda as imagens em 90, 180 ou 270 graus;
# 4. Transformar as imagens para tons de cinza (grayscale);
# 5. Transformar todas as imagens em array (numpy)
# ORIENTAÇÕES ADICIONAIS:
# 1. Podem trabalhar com as imagens que forem obtidas do vídeo ou com as 4 imagens que estão contidas na pasta.
# Adicionalmente, podem fazer os mesmos experimentos com algum outro conjunto de imagens que desejem.
# 2. Abaixo estão alguns trechos de código que podem ajudar.
from PIL import Image
import os
import glob
import numpy as np
try:
# creating a folder
if not os.path.exists('images/resized_images'):
os.makedirs('images/resized_images')
if not os.path.exists('images/rotated_images'):
os.makedirs('images/rotated_images')
if not os.path.exists('images/grayscale_images'):
os.makedirs('images/grayscale_images')
# if not created then raise error
except OSError:
print('Error: Creating directory of data')
print("\n========== Adicionando imagens a um array e printando seu tamanho ==========\n")
image_list = []
for filename in glob.glob('images/*.jpg'):
img = Image.open(filename)
image_list.append(img)
print(image_list)
print("\n========== Transformando todas as imagens em array (numpy) ==========\n")
numpy_list = [np.array(image) for image in image_list]
print(len(numpy_list))
print("\n========== Printando a imagem que está em RBG e verificando seu tamanho ==========\n")
print(numpy_list[0]) # RGB
print(numpy_list[0].shape)
print(image_list[0])
print("\n========== Transformando o tamanho da imagem para 600x600 e salvando em uma pasta ==========\n")
resized_image_list = []
for img in image_list:
img = img.resize((600, 600))
resized_image_list.append(img)
for (i, new) in enumerate(resized_image_list):
new.save('{}{}{}'.format('C:/Users/Pedro/PycharmProjects/machine-learning/ddd/images/resized_images/', i + 1,
'.jpg'))
print(image_list[0].size)
print(resized_image_list[0].size)
print("\n========== Rotacionando todas as imagens em 90, 180, 270 e 360 graus ==========\n")
rotated_image_list = []
rotate = 90
for i in range(4):
for img in image_list:
img = img.rotate(rotate)
rotated_image_list.append(img)
for (j, new) in enumerate(rotated_image_list):
new.save('{}{}{}'.format('C:/Users/Pedro/PycharmProjects/machine-learning/ddd/images/rotated_images/',
j + 1, '.jpg'))
rotate += 90
print("\n========== Transformando as imagens para tons de cinza (grayscale) ==========\n")
grayscale_image_list = []
for img in image_list:
img = img.convert('LA')
grayscale_image_list.append(img)
for (i, new) in enumerate(grayscale_image_list):
new.save('{}{}{}'.format('C:/Users/Pedro/PycharmProjects/machine-learning/ddd/images/grayscale_images/', i + 1,
'.png'))
| [
"os.path.exists",
"PIL.Image.open",
"os.makedirs",
"numpy.array",
"glob.glob"
] | [((1240, 1265), 'glob.glob', 'glob.glob', (['"""images/*.jpg"""'], {}), "('images/*.jpg')\n", (1249, 1265), False, 'import glob\n'), ((1277, 1297), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1287, 1297), False, 'from PIL import Image\n'), ((1442, 1457), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1450, 1457), True, 'import numpy as np\n'), ((735, 774), 'os.path.exists', 'os.path.exists', (['"""images/resized_images"""'], {}), "('images/resized_images')\n", (749, 774), False, 'import os\n'), ((784, 820), 'os.makedirs', 'os.makedirs', (['"""images/resized_images"""'], {}), "('images/resized_images')\n", (795, 820), False, 'import os\n'), ((832, 871), 'os.path.exists', 'os.path.exists', (['"""images/rotated_images"""'], {}), "('images/rotated_images')\n", (846, 871), False, 'import os\n'), ((881, 917), 'os.makedirs', 'os.makedirs', (['"""images/rotated_images"""'], {}), "('images/rotated_images')\n", (892, 917), False, 'import os\n'), ((929, 970), 'os.path.exists', 'os.path.exists', (['"""images/grayscale_images"""'], {}), "('images/grayscale_images')\n", (943, 970), False, 'import os\n'), ((980, 1018), 'os.makedirs', 'os.makedirs', (['"""images/grayscale_images"""'], {}), "('images/grayscale_images')\n", (991, 1018), False, 'import os\n')] |
import torch
from config import config as cfg
import backbones
import logging
import losses
import os
import torch
import torch.distributed as dist
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data.distributed
from utils.utils_logging import AverageMeter, init_logging
from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
from eval_local import CallBack_LocalVerifi
from tqdm import tqdm
import gc
import copy
import pickle
import numpy as np
from torch.utils.data import Dataset, DataLoader
from dataset import MXFaceDataset_Subset,MXFaceDataset_Combine,DataLoaderX
from functools import reduce
from backbones import BottleBlock
class BCE_module(nn.Module):
def __init__(self,hidden,n_class,converter_layer=1,m=0.4,r=30.0,t=3):
super(BCE_module,self).__init__()
converter = []
if converter_layer == 1:
layer = nn.Linear(hidden,hidden)
nn.init.eye_(layer.weight)
nn.init.constant_(layer.bias, 0.0)
converter.append(layer)
self.converter = nn.Sequential(*converter)
else:
self.converter = BottleBlock(hidden, 4)
self.weight = nn.Parameter(torch.normal(0,0.01,(n_class,hidden)))
self.bias = nn.Parameter(torch.zeros(n_class))
self.g_func = lambda x : (2*(((x+1)/2).pow(t))-1)
self.n_class = n_class
self.hidden = hidden
self.m = m
self.r = r
def forward(self,x,labels):
feat = self.converter(x)
cosine = torch.matmul(F.normalize(feat), F.normalize(self.weight).t())
gt = torch.zeros(len(x),self.n_class+1,device=x.device).bool()
tmp_labels = labels.clone()
tmp_labels[tmp_labels >= self.n_class] = self.n_class
gt[torch.arange(len(x)),tmp_labels] = True
gt = gt[:,:-1]
# positive
cosine[gt] = self.r * (self.g_func(cosine[gt])-self.m)
# negative
cosine[~gt] = self.r * (self.g_func(cosine[~gt])+self.m)
cosine += self.bias.unsqueeze(0)
return cosine, gt
def initialize(self,fc):
self.weight.data = fc.clone()
class FC_module(nn.Module):
def __init__(self,hidden,n_class,output_dir):
super(FC_module,self).__init__()
self.fc = nn.Parameter(torch.normal(0,0.01,(n_class,hidden)))
self.output_dir = output_dir
self.n_class = n_class
def forward(self,x,normalize_feat=True):
if normalize_feat:
output = torch.matmul(F.normalize(x),F.normalize(self.fc).t())
else:
output = torch.matmul(x,F.normalize(self.fc).t())
return output
def update_from_tensor(self,fc):
self.fc.data = fc.clone()
def update_with_pretrain(self,pretrain_fc):
self.fc = nn.Parameter(torch.cat([self.fc.data,pretrain_fc],dim=0))
def remove_pretrain(self):
self.fc.data = self.fc.data[0:self.n_class]
def get_pretrain_fc(self):
return self.fc.data[self.n_class:]
class Branch_model(nn.Module):
def __init__(self,backbone,fc_module,bce_module):
super(Branch_model,self).__init__()
self.backbone = backbone
self.fc_module = fc_module
self.bce_module = bce_module
def forward(self,imgs,labels,contrastive=False,detach=False):
feature = self.backbone(imgs)
cosface_logits = self.fc_module(feature)
if detach:
bce_logits,bce_gts = self.bce_module(feature.detach(),labels)
else:
bce_logits,bce_gts = self.bce_module(feature,labels)
if contrastive:
return cosface_logits, bce_logits, bce_gts,feature
return cosface_logits, bce_logits, bce_gts
class Sequential_model(nn.Module):
def __init__(self,backbone,fc_module):
super(Sequential_model,self).__init__()
self.backbone = backbone
self.fc_module = fc_module
def forward(self,imgs,contrastive=False):
feature = self.backbone(imgs)
logits = self.fc_module(feature)
if contrastive:
return logits,feature
else:
return logits
class Client(object):
def __init__(self, cid, args, data):
self.cid = cid
self.args = args
self.num_classes = data.train_class_sizes[self.cid]
self.local_epoch = args.local_epoch
self.dataset_size = data.train_dataset_sizes[self.cid]
self.train_loader = data.train_loaders[self.cid]
# The global base ID for each client (ex. local: 0-99, global: 300-399 )
self.ID_base = data.train_loaders[self.cid].dataset.ID_base
self.target_ID = list(range(self.ID_base,self.ID_base+self.num_classes))
if hasattr(data, 'test_loaders'):
self.test_loaders = data.test_loaders[self.cid]
if hasattr(data, 'public_train_loader'):
self.public_num_classes = data.public_train_loader.dataset.num_classes
# add margin
self.margin_softmax = eval("losses.{}".format(args.loss))(s=30,m=0.4)
if self.args.BCE_local:
self.bce_module = BCE_module(512, self.num_classes,cfg.converter_layer)
self.bce_loss = losses.BCE_loss()
### distributed
self.rank = 0 #dist.get_rank()
self.local_rank = 0 #args.local_rank
self.dropout = 0.4 if cfg.dataset is "webface" else 0
self.backbone_state_dict = None
### Create directory
self.client_output = os.path.join(args.output_dir,'clients','client_%d'%(self.cid))
### FC module, on cpu
self.fc_module = FC_module(512,self.num_classes,self.client_output)
### contrastive backbone (bb)
if self.args.contrastive_bb:
self.last_model = eval("backbones.{}".format(self.args.network))(False, dropout=self.dropout, fp16=cfg.fp16)
self.con_criterion = nn.CosineSimilarity(dim=1)
self.temperature = 0.5
self.logger = logging.getLogger('FL_face.client')
def data_update_fc(self,fed_model_state_dict,norm_before_avg,fc_name='center_features',save_to_disk=False):
with torch.no_grad():
### Creating model
backbone = eval("backbones.{}".format(self.args.network))(False, dropout=self.dropout, fp16=cfg.fp16)
backbone.load_state_dict(fed_model_state_dict)
backbone = nn.DataParallel(backbone)
backbone.to(self.local_rank)
backbone.eval()
### Local forward data
init_fc = torch.zeros_like(self.fc_module.fc.data).to(self.local_rank)
num_samples = torch.zeros(self.num_classes).to(self.local_rank)
for step, (img, label) in enumerate(self.test_loaders):
features = backbone(img)
if norm_before_avg:
features = F.normalize(features)
u_label = torch.unique(label)
for l in u_label:
init_fc[l:l+1,:] += torch.sum(features[label==l,:],dim=0)
num_samples[l] += torch.sum(label==l)
### average features
init_fc /= num_samples.unsqueeze(1)
init_fc = init_fc.cpu()
if save_to_disk:
torch.save(init_fc,os.path.join(self.client_output,fc_name+'.pth'))
self.fc_module.update_from_tensor(init_fc)
del backbone,num_samples
## feature-based
def choose_hard_negative_2(self,public_train_loader,pretrained_label,pretrained_feats,threshold=0.2):
public_loader_subset = copy.deepcopy(public_train_loader)
## forward local data
local_feats = []
backbone = eval("backbones.{}".format(self.args.network))(False, dropout=self.dropout, fp16=cfg.fp16)
backbone.load_state_dict(self.backbone_state_dict)
backbone.eval()
backbone.to(self.local_rank)
#
with torch.no_grad():
for step, (img, label) in enumerate(self.test_loaders):
features = F.normalize(backbone(img.to(self.local_rank)))
local_feats.append(features.cpu())
local_feats = torch.cat(local_feats,dim=0)
backbone = backbone.cpu()
similarity = torch.matmul(local_feats,pretrained_feats.t())
unique_idx = []
times = 100 ## prevent out of mem of RAM (when torch.where())
batch = len(similarity)//times +1
for i in range(times):
unique_idx.append(torch.where(similarity[i*batch:(i+1)*batch]>threshold)[1].numpy())
## do Union
unique_idx = sorted(reduce(np.union1d,unique_idx))
## update "imgidx" in dataset
public_loader_subset.dataset.imgidx = np.array(unique_idx)+1
num_ID = len(torch.unique(pretrained_label[unique_idx]))
self.logger.info('%d imgs (%d ID) are hard negative with similarity > %.2f'%(len(unique_idx),num_ID,threshold))
del backbone,similarity
gc.collect()
torch.cuda.empty_cache()
return public_loader_subset
## FC-based
def choose_hard_negative(self,pretrain_fc,public_train_loader,pretrain_label,self_fc,threshold=0.2,batch_size=128):
public_loader_subset = copy.deepcopy(public_train_loader)
similarity = torch.matmul(F.normalize(self_fc),F.normalize(pretrain_fc).t())
# n_class x 6000
if isinstance(threshold, float):
IDs = torch.unique(torch.where(similarity>threshold)[1]).numpy()
if isinstance(threshold, int):
# IDs = torch.argsort(simlarity,descending=True)[:threshold].numpy()
raise NotImplementedError
self.logger.info('%d ID are hard negative'%(len(IDs)))
self.HN_ID = IDs
## return all FC
pretrain_fc_subset = pretrain_fc
# pretrain_fc_subset = pretrain_fc[IDs]
# relabel_dict = dict()
# for i in range(len(IDs)):
# relabel_dict[IDs[i]] = i
left_img_idx = []
pretrain_label = pretrain_label.numpy()
for i in range(len(pretrain_label)):
if pretrain_label[i] in IDs:
# imgidx start from 1
left_img_idx.append(i+1)
# alter dataset
public_loader_subset.dataset.imgidx = np.array(left_img_idx)
# class order is the same.
# imgrec = public_loader_subset.dataset.imgrec
# imgidx = np.array(left_img_idx)
# num_classes = len(IDs)
# transform = public_loader_subset.dataset.transform
# tmp_dataset = MXFaceDataset_Subset(imgrec, imgidx, num_classes, relabel_dict, transform)
# public_loader_subset = DataLoader(tmp_dataset,batch_size=batch_size,shuffle=True,num_workers=2,\
# pin_memory=True,drop_last=True)
return pretrain_fc_subset,public_loader_subset
def reweight_cosface(self,logits,labels):
# logits : (B, C)
# labels : B
with torch.no_grad():
idx_bool = torch.ones(logits.shape).bool()
idx_bool[torch.arange(len(labels)),labels] = False
tmp = logits.detach().clone()[idx_bool].reshape(len(labels),logits.shape[1]-1)[:,:self.num_classes].repeat(1,self.args.num_client-1)
logits = torch.cat([logits,tmp],dim=1)
# with torch.no_grad():
# idx_bool = torch.zeros(logits.shape).bool()
# for i in range(len(labels)):
# idx = torch.randperm(4000-self.num_classes)
# idx_bool[i,idx] = True
# balance = logits.detach().clone()[idx_bool].reshape(len(labels),4000-self.num_classes)
# logits = torch.cat([logits,balance],dim=1)
return logits
def train_with_public_data(self,start_epoch=0,callback_verification=None,\
public_train_loader=None,pretrained_fc=None,choose_hard_negative=False,\
pretrained_label=None,pretrained_feats=None):
### Create hard negative dataloader
if choose_hard_negative:
public_loader_subset = self.choose_hard_negative_2(public_train_loader, pretrained_label, pretrained_feats,\
threshold=cfg.HN_threshold)
else:
public_loader_subset = public_train_loader
### combine dataloader
if self.args.combine_dataset:
combine_dataset = MXFaceDataset_Combine(self.train_loader.dataset, public_loader_subset.dataset)
combine_loader = DataLoader(combine_dataset,batch_size=cfg.com_batch_size,shuffle=True,num_workers=6,pin_memory=True,drop_last=True)
### Update dataset size, for FedAvg
self.dataset_size = len(combine_dataset)
else:
raise NotImplementedError()
### Create backbone, load weight, put GPU
backbone = eval("backbones.{}".format(self.args.network))(False, dropout=self.dropout, fp16=cfg.fp16)
backbone.load_state_dict(self.backbone_state_dict)
backbone.train()
backbone.to(self.local_rank)
### Update self FC module and put FC to gpu
self.fc_module.update_with_pretrain(pretrained_fc)
self.fc_module.train()
self.fc_module.to(self.local_rank)
if self.args.BCE_local:
### Create BCE model
self.bce_module.train()
self.bce_module.to(self.local_rank)
model = Branch_model(backbone, self.fc_module, self.bce_module)
else:
model = Sequential_model(backbone, self.fc_module)
### Contrastive backbone, to parallel
if self.args.contrastive_bb:
with torch.no_grad():
global_model = nn.DataParallel(copy.deepcopy(backbone).to(self.local_rank)).eval()
self.last_model = nn.DataParallel(self.last_model.to(self.local_rank)).eval()
### first local test
if callback_verification is not None and start_epoch == 0:
self.logger.info('Pretrain Local testing')
callback_verification.veri_test(backbone, -1, self.target_ID, self.cid)
opt = torch.optim.SGD(params=model.parameters(),lr=cfg.lr,momentum=0.9,weight_decay=cfg.weight_decay)
### For different lr of backbone & BCE
# if self.args.BCE_local:
# opt = torch.optim.SGD([{'params':model.backbone.parameters()},{'params':model.fc_module.parameters()}],
# lr=cfg.lr,momentum=0.9,weight_decay=cfg.weight_decay)
# opt_bce = torch.optim.SGD(params=model.bce_module.parameters(),lr=cfg.lr_func(start_epoch)*10*cfg.lr,\
# momentum=0.9,weight_decay=cfg.weight_decay)
# else:
# raise NotImplementedError()
model = torch.nn.DataParallel(model)
schler = torch.optim.lr_scheduler.StepLR(opt,cfg.train_decay,gamma=0.1)
loss_meter = AverageMeter()
cos_meter = AverageMeter()
con_meter = AverageMeter()
bce_meter = AverageMeter()
### Start train w/ combine
for epoch in range(start_epoch, start_epoch+self.local_epoch):
self.logger.info('Epoch %d,Total Epoch %d, Total step : %d, lr=%.4f'%(epoch,start_epoch+self.local_epoch,\
len(combine_loader),schler.get_last_lr()[0]))
pbar = tqdm(total=len(combine_loader),ncols=120,leave=True)
if self.args.BCE_local: ### train with BCE loss
for step, (imgs, labels) in enumerate(combine_loader):
opt.zero_grad()
# opt_bce.zero_grad()
imgs = imgs.to(self.local_rank)
labels = labels.to(self.local_rank)
### train w/ contrastive
if self.args.contrastive_bb:
with torch.no_grad():
global_feats = global_model(imgs)
last_feats = self.last_model(imgs)
cos_logits, bce_logits ,bce_gts , feats = model(imgs,labels,contrastive=True,detach=self.args.BCE_detach)
# Contrastive
pos_sim = self.con_criterion(feats,global_feats)/self.temperature
neg_sim = self.con_criterion(feats,last_feats)/self.temperature
con_label = torch.zeros(len(labels),device=pos_sim.device).long()
con_loss = F.cross_entropy(torch.stack([pos_sim,neg_sim],dim=1), con_label)
# Cosface
cos_logits = self.margin_softmax(cos_logits,labels)
if self.args.reweight_cosface:
cos_logits = self.reweight_cosface(cos_logits, labels)
cos_loss = F.cross_entropy(cos_logits, labels)
# bce loss
bce_loss = self.bce_loss(bce_logits,bce_gts)
loss = cos_loss + 10 * bce_loss + cfg.mu * con_loss
con_meter.update(con_loss.item(),1)
else:
cos_logits, bce_logits ,bce_gts = model(imgs,labels,contrastive=False,detach=self.args.BCE_detach)
# Cosface
cos_logits = self.margin_softmax(cos_logits,labels)
if self.args.reweight_cosface:
cos_logits = self.reweight_cosface(cos_logits, labels)
cos_loss = F.cross_entropy(cos_logits, labels)
# bce loss
bce_loss = self.bce_loss(bce_logits,bce_gts)
loss = cos_loss + 10 * bce_loss
loss.backward()
opt.step()
# opt_bce.step()
loss_meter.update(loss.item(),1)
cos_meter.update(cos_loss.item(),1)
bce_meter.update(bce_loss.item(),1)
if step > 10 and step % 40 == 0:
pbar.set_postfix(loss='%.3f,%.3f,%.3f,%.3f'%(loss_meter.avg,cos_meter.avg,con_meter.avg,bce_meter.avg))
self.logger.debug('Step %d, Loss : %.3f,%.3f,%.3f,%.3f'%(step,loss_meter.avg,cos_meter.avg,con_meter.avg,bce_meter.avg))
pbar.update(1)
else:
for step, (imgs, labels) in enumerate(combine_loader):
opt.zero_grad()
imgs = imgs.to(self.local_rank)
labels = labels.to(self.local_rank)
if self.args.contrastive_bb:
with torch.no_grad():
global_feats = global_model(imgs)
last_feats = self.last_model(imgs)
logits, feats = model(imgs)
# Contrastive
pos_sim = self.con_criterion(feats,global_feats)/self.temperature
neg_sim = self.con_criterion(feats,last_feats)/self.temperature
con_label = torch.zeros(len(labels),device=pos_sim.device).long()
con_loss = F.cross_entropy(torch.stack([pos_sim,neg_sim],dim=1), con_label)
#Cosface
logits = self.margin_softmax(logits,labels)
if self.args.reweight_cosface:
logits = self.reweight_cosface(logits, labels)
cos_loss = F.cross_entropy(logits, labels)
loss = cos_loss + cfg.mu*con_loss
con_meter.update(con_loss.item(),1)
cos_meter.update(cos_loss.item(),1)
else:
logits = model(imgs)
logits = self.margin_softmax(logits,labels)
if self.args.reweight_cosface:
logits = self.reweight_cosface(logits, labels)
loss = F.cross_entropy(logits, labels)
loss.backward()
opt.step()
loss_meter.update(loss.item(),1)
if step > 10 and step % 40 == 0:
pbar.set_postfix(loss='%.3f,%.3f,%.3f'%(loss_meter.avg,cos_meter.avg,con_meter.avg))
self.logger.debug('Step %d, Loss : %.3f,%.3f,%.3f'%(step,loss_meter.avg,cos_meter.avg,con_meter.avg))
pbar.update(1)
pbar.close()
schler.step()
if self.rank is 0: self.logger.info("Client %d Ends: loss = %.3f"%(self.cid, loss_meter.avg))
# ### tune BCE
# if self.args.BCE_tune:
# opt_bce = torch.optim.SGD(params=self.bce_module.parameters(),lr=cfg.lr,momentum=0.9,weight_decay=cfg.weight_decay)
# for w in self.bce_module.parameters():
# opt_bce.state[w] = opt.state[w]
# for e in range(cfg.fine_tune_epoch):
# total_loss = 0
# pbar = tqdm(total=len(combine_loader),ncols=120,leave=True)
# for step, (imgs, labels) in enumerate(combine_loader):
# opt_bce.zero_grad()
# imgs = imgs.to(self.local_rank)
# labels = labels.to(self.local_rank)
# with torch.no_grad():
# feature = backbone(imgs)
# bce_logits,bce_gts = self.bce_module(feature,labels)
# bce_loss = 10*self.bce_loss(bce_logits,bce_gts)
# bce_loss.backward()
# total_loss += bce_loss.item()
# opt_bce.step()
# pbar.update(1)
# pbar.close()
# print(total_loss/len(combine_loader))
### To CPU
backbone = backbone.cpu()
self.fc_module = self.fc_module.cpu()
if self.args.BCE_local:
self.bce_module = self.bce_module.cpu()
if self.args.contrastive_bb:
global_model = global_model.module.cpu()
self.last_model = self.last_model.module.cpu()
### Local test
if callback_verification is not None:
self.logger.info("Client %d Local Testing"%(self.cid))
os.system('mkdir -p %s'%(self.client_output))
if self.args.BCE_local:
backbone_converter = nn.Sequential(backbone,self.bce_module.converter)
callback_verification.veri_test(backbone_converter, epoch, self.target_ID,self.cid)
torch.save(backbone.state_dict(),os.path.join(self.client_output,'backbone.pth'))
torch.save(self.bce_module.state_dict(),os.path.join(self.client_output,'bce_module.pth'))
else:
callback_verification.veri_test(backbone, epoch, self.target_ID,self.cid)
torch.save(backbone.state_dict(),os.path.join(self.client_output,'backbone.pth'))
else: ##others just save model
os.system('mkdir -p %s'%(self.client_output))
if self.args.BCE_local:
torch.save(backbone.state_dict(),os.path.join(self.client_output,'backbone.pth'))
torch.save(self.bce_module.state_dict(),os.path.join(self.client_output,'bce_module.pth'))
else:
torch.save(backbone.state_dict(),os.path.join(self.client_output,'backbone.pth'))
self.backbone_state_dict = backbone.state_dict()
if self.args.contrastive_bb:
self.last_model.load_state_dict(backbone.state_dict())
del global_model
self.loss_meter = loss_meter
# delete garbage
del backbone
gc.collect()
torch.cuda.empty_cache()
def train(self,start_epoch=0,callback_verification=None):
# put model to gpu
backbone = eval("backbones.{}".format(self.args.network))(False, dropout=self.dropout, fp16=cfg.fp16)
backbone.load_state_dict(self.backbone_state_dict)
backbone.to(self.local_rank)
backbone.train()
self.fc_module.to(self.local_rank)
self.fc_module.train()
model = nn.DataParallel(nn.Sequential(backbone,self.fc_module))
# first test
if callback_verification is not None and start_epoch == 0:
self.logger.info('Pretrain Local testing')
callback_verification.veri_test(backbone, -1, self.target_ID, self.cid)
opt = torch.optim.SGD(
params=model.parameters(),
lr=cfg.lr_func(start_epoch)*cfg.lr,momentum=0.9,weight_decay=cfg.weight_decay)
loss_meter = AverageMeter()
for epoch in range(start_epoch, start_epoch+self.local_epoch):
self.logger.info('Epoch %d, Total step : %d'%(epoch, len(self.train_loader)))
pbar = tqdm(total=len(self.train_loader),ncols=120,leave=True)
for step, (imgs, labels) in enumerate(self.train_loader):
opt.zero_grad()
if len(imgs) ==1:
imgs = torch.cat([imgs,imgs],dim=0)
labels = torch.cat([labels,labels])
imgs = imgs.to(self.local_rank)
labels = labels.to(self.local_rank)
logits = model(imgs)
logits = self.margin_softmax(logits,labels)
loss = F.cross_entropy(logits, labels)
# pos cosine loss
# output = F.relu(0.9 - self.fc_module(features))**2
# loss = torch.mean(output)
loss.backward()
opt.step()
loss_meter.update(loss.item(), 1)
pbar.update(1)
if step > 10 and step % 50 == 0:
pbar.set_postfix(loss='%.3f'%loss_meter.avg)
pbar.close()
if self.rank is 0: self.logger.info("Client %d Ends: loss = %.3f"%(self.cid, loss_meter.avg))
backbone = backbone.cpu()
self.fc_module = self.fc_module.cpu()
self.loss_meter = loss_meter
# callback
if callback_verification is not None:
self.logger.info("Client %d Local Testing"%(self.cid))
os.system('mkdir -p %s'%(self.client_output))
callback_verification.veri_test(backbone, epoch, self.target_ID,self.cid)
torch.save(backbone.state_dict(),os.path.join(self.client_output,'backbone.pth'))
self.backbone_state_dict = backbone.state_dict()
# delete garbage
del backbone
gc.collect()
def get_train_loss(self):
return self.loss_meter.avg
def get_model(self):
return self.backbone_state_dict
def get_global_fc(self):
return self.fc_module.get_pretrain_fc()
def get_model_path(self):
return os.path.join(self.client_output,'backbone.pth')
def get_data_size(self):
return self.dataset_size
| [
"logging.getLogger",
"torch.nn.init.eye_",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"numpy.array",
"torch.normal",
"torch.sum",
"copy.deepcopy",
"torch.unique",
"dataset.MXFaceDataset_Combine",
"os.system",
"torch.zeros_like",
"functools.reduce",
"utils.utils_logging.AverageMeter"... | [((5494, 5558), 'os.path.join', 'os.path.join', (['args.output_dir', '"""clients"""', "('client_%d' % self.cid)"], {}), "(args.output_dir, 'clients', 'client_%d' % self.cid)\n", (5506, 5558), False, 'import os\n'), ((5980, 6015), 'logging.getLogger', 'logging.getLogger', (['"""FL_face.client"""'], {}), "('FL_face.client')\n", (5997, 6015), False, 'import logging\n'), ((7586, 7620), 'copy.deepcopy', 'copy.deepcopy', (['public_train_loader'], {}), '(public_train_loader)\n', (7599, 7620), False, 'import copy\n'), ((8992, 9004), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9002, 9004), False, 'import gc\n'), ((9013, 9037), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (9035, 9037), False, 'import torch\n'), ((9247, 9281), 'copy.deepcopy', 'copy.deepcopy', (['public_train_loader'], {}), '(public_train_loader)\n', (9260, 9281), False, 'import copy\n'), ((10306, 10328), 'numpy.array', 'np.array', (['left_img_idx'], {}), '(left_img_idx)\n', (10314, 10328), True, 'import numpy as np\n'), ((14870, 14898), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (14891, 14898), False, 'import torch\n'), ((14917, 14981), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['opt', 'cfg.train_decay'], {'gamma': '(0.1)'}), '(opt, cfg.train_decay, gamma=0.1)\n', (14948, 14981), False, 'import torch\n'), ((15001, 15015), 'utils.utils_logging.AverageMeter', 'AverageMeter', ([], {}), '()\n', (15013, 15015), False, 'from utils.utils_logging import AverageMeter, init_logging\n'), ((15036, 15050), 'utils.utils_logging.AverageMeter', 'AverageMeter', ([], {}), '()\n', (15048, 15050), False, 'from utils.utils_logging import AverageMeter, init_logging\n'), ((15071, 15085), 'utils.utils_logging.AverageMeter', 'AverageMeter', ([], {}), '()\n', (15083, 15085), False, 'from utils.utils_logging import AverageMeter, init_logging\n'), ((15106, 15120), 'utils.utils_logging.AverageMeter', 'AverageMeter', ([], {}), '()\n', (15118, 15120), False, 'from utils.utils_logging import AverageMeter, init_logging\n'), ((23876, 23888), 'gc.collect', 'gc.collect', ([], {}), '()\n', (23886, 23888), False, 'import gc\n'), ((23897, 23921), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (23919, 23921), False, 'import torch\n'), ((24812, 24826), 'utils.utils_logging.AverageMeter', 'AverageMeter', ([], {}), '()\n', (24824, 24826), False, 'from utils.utils_logging import AverageMeter, init_logging\n'), ((26700, 26712), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26710, 26712), False, 'import gc\n'), ((26966, 27014), 'os.path.join', 'os.path.join', (['self.client_output', '"""backbone.pth"""'], {}), "(self.client_output, 'backbone.pth')\n", (26978, 27014), False, 'import os\n'), ((924, 949), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'hidden'], {}), '(hidden, hidden)\n', (933, 949), True, 'import torch.nn as nn\n'), ((961, 987), 'torch.nn.init.eye_', 'nn.init.eye_', (['layer.weight'], {}), '(layer.weight)\n', (973, 987), True, 'import torch.nn as nn\n'), ((1000, 1034), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias', '(0.0)'], {}), '(layer.bias, 0.0)\n', (1017, 1034), True, 'import torch.nn as nn\n'), ((1100, 1125), 'torch.nn.Sequential', 'nn.Sequential', (['*converter'], {}), '(*converter)\n', (1113, 1125), True, 'import torch.nn as nn\n'), ((1169, 1191), 'backbones.BottleBlock', 'BottleBlock', (['hidden', '(4)'], {}), '(hidden, 4)\n', (1180, 1191), False, 'from backbones import BottleBlock\n'), ((1228, 1268), 'torch.normal', 'torch.normal', (['(0)', '(0.01)', '(n_class, hidden)'], {}), '(0, 0.01, (n_class, hidden))\n', (1240, 1268), False, 'import torch\n'), ((1300, 1320), 'torch.zeros', 'torch.zeros', (['n_class'], {}), '(n_class)\n', (1311, 1320), False, 'import torch\n'), ((1573, 1590), 'torch.nn.functional.normalize', 'F.normalize', (['feat'], {}), '(feat)\n', (1584, 1590), True, 'import torch.nn.functional as F\n'), ((2317, 2357), 'torch.normal', 'torch.normal', (['(0)', '(0.01)', '(n_class, hidden)'], {}), '(0, 0.01, (n_class, hidden))\n', (2329, 2357), False, 'import torch\n'), ((2820, 2865), 'torch.cat', 'torch.cat', (['[self.fc.data, pretrain_fc]'], {'dim': '(0)'}), '([self.fc.data, pretrain_fc], dim=0)\n', (2829, 2865), False, 'import torch\n'), ((5188, 5205), 'losses.BCE_loss', 'losses.BCE_loss', ([], {}), '()\n', (5203, 5205), False, 'import losses\n'), ((5895, 5921), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(1)'}), '(dim=1)\n', (5914, 5921), True, 'import torch.nn as nn\n'), ((6142, 6157), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6155, 6157), False, 'import torch\n'), ((6386, 6411), 'torch.nn.DataParallel', 'nn.DataParallel', (['backbone'], {}), '(backbone)\n', (6401, 6411), True, 'import torch.nn as nn\n'), ((7938, 7953), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7951, 7953), False, 'import torch\n'), ((8174, 8203), 'torch.cat', 'torch.cat', (['local_feats'], {'dim': '(0)'}), '(local_feats, dim=0)\n', (8183, 8203), False, 'import torch\n'), ((8628, 8658), 'functools.reduce', 'reduce', (['np.union1d', 'unique_idx'], {}), '(np.union1d, unique_idx)\n', (8634, 8658), False, 'from functools import reduce\n'), ((8743, 8763), 'numpy.array', 'np.array', (['unique_idx'], {}), '(unique_idx)\n', (8751, 8763), True, 'import numpy as np\n'), ((8787, 8829), 'torch.unique', 'torch.unique', (['pretrained_label[unique_idx]'], {}), '(pretrained_label[unique_idx])\n', (8799, 8829), False, 'import torch\n'), ((9316, 9336), 'torch.nn.functional.normalize', 'F.normalize', (['self_fc'], {}), '(self_fc)\n', (9327, 9336), True, 'import torch.nn.functional as F\n'), ((11004, 11019), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11017, 11019), False, 'import torch\n'), ((11305, 11336), 'torch.cat', 'torch.cat', (['[logits, tmp]'], {'dim': '(1)'}), '([logits, tmp], dim=1)\n', (11314, 11336), False, 'import torch\n'), ((12487, 12565), 'dataset.MXFaceDataset_Combine', 'MXFaceDataset_Combine', (['self.train_loader.dataset', 'public_loader_subset.dataset'], {}), '(self.train_loader.dataset, public_loader_subset.dataset)\n', (12508, 12565), False, 'from dataset import MXFaceDataset_Subset, MXFaceDataset_Combine, DataLoaderX\n'), ((12595, 12719), 'torch.utils.data.DataLoader', 'DataLoader', (['combine_dataset'], {'batch_size': 'cfg.com_batch_size', 'shuffle': '(True)', 'num_workers': '(6)', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(combine_dataset, batch_size=cfg.com_batch_size, shuffle=True,\n num_workers=6, pin_memory=True, drop_last=True)\n', (12605, 12719), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((22441, 22486), 'os.system', 'os.system', (["('mkdir -p %s' % self.client_output)"], {}), "('mkdir -p %s' % self.client_output)\n", (22450, 22486), False, 'import os\n'), ((23172, 23217), 'os.system', 'os.system', (["('mkdir -p %s' % self.client_output)"], {}), "('mkdir -p %s' % self.client_output)\n", (23181, 23217), False, 'import os\n'), ((24352, 24391), 'torch.nn.Sequential', 'nn.Sequential', (['backbone', 'self.fc_module'], {}), '(backbone, self.fc_module)\n', (24365, 24391), True, 'import torch.nn as nn\n'), ((26362, 26407), 'os.system', 'os.system', (["('mkdir -p %s' % self.client_output)"], {}), "('mkdir -p %s' % self.client_output)\n", (26371, 26407), False, 'import os\n'), ((2530, 2544), 'torch.nn.functional.normalize', 'F.normalize', (['x'], {}), '(x)\n', (2541, 2544), True, 'import torch.nn.functional as F\n'), ((6901, 6920), 'torch.unique', 'torch.unique', (['label'], {}), '(label)\n', (6913, 6920), False, 'import torch\n'), ((13753, 13768), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13766, 13768), False, 'import torch\n'), ((22560, 22610), 'torch.nn.Sequential', 'nn.Sequential', (['backbone', 'self.bce_module.converter'], {}), '(backbone, self.bce_module.converter)\n', (22573, 22610), True, 'import torch.nn as nn\n'), ((25540, 25571), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (25555, 25571), True, 'import torch.nn.functional as F\n'), ((26539, 26587), 'os.path.join', 'os.path.join', (['self.client_output', '"""backbone.pth"""'], {}), "(self.client_output, 'backbone.pth')\n", (26551, 26587), False, 'import os\n'), ((1592, 1616), 'torch.nn.functional.normalize', 'F.normalize', (['self.weight'], {}), '(self.weight)\n', (1603, 1616), True, 'import torch.nn.functional as F\n'), ((6539, 6579), 'torch.zeros_like', 'torch.zeros_like', (['self.fc_module.fc.data'], {}), '(self.fc_module.fc.data)\n', (6555, 6579), False, 'import torch\n'), ((6626, 6655), 'torch.zeros', 'torch.zeros', (['self.num_classes'], {}), '(self.num_classes)\n', (6637, 6655), False, 'import torch\n'), ((6853, 6874), 'torch.nn.functional.normalize', 'F.normalize', (['features'], {}), '(features)\n', (6864, 6874), True, 'import torch.nn.functional as F\n'), ((6995, 7036), 'torch.sum', 'torch.sum', (['features[label == l, :]'], {'dim': '(0)'}), '(features[label == l, :], dim=0)\n', (7004, 7036), False, 'import torch\n'), ((7071, 7092), 'torch.sum', 'torch.sum', (['(label == l)'], {}), '(label == l)\n', (7080, 7092), False, 'import torch\n'), ((7285, 7335), 'os.path.join', 'os.path.join', (['self.client_output', "(fc_name + '.pth')"], {}), "(self.client_output, fc_name + '.pth')\n", (7297, 7335), False, 'import os\n'), ((9337, 9361), 'torch.nn.functional.normalize', 'F.normalize', (['pretrain_fc'], {}), '(pretrain_fc)\n', (9348, 9361), True, 'import torch.nn.functional as F\n'), ((11044, 11068), 'torch.ones', 'torch.ones', (['logits.shape'], {}), '(logits.shape)\n', (11054, 11068), False, 'import torch\n'), ((22759, 22807), 'os.path.join', 'os.path.join', (['self.client_output', '"""backbone.pth"""'], {}), "(self.client_output, 'backbone.pth')\n", (22771, 22807), False, 'import os\n'), ((22864, 22914), 'os.path.join', 'os.path.join', (['self.client_output', '"""bce_module.pth"""'], {}), "(self.client_output, 'bce_module.pth')\n", (22876, 22914), False, 'import os\n'), ((23072, 23120), 'os.path.join', 'os.path.join', (['self.client_output', '"""backbone.pth"""'], {}), "(self.client_output, 'backbone.pth')\n", (23084, 23120), False, 'import os\n'), ((23303, 23351), 'os.path.join', 'os.path.join', (['self.client_output', '"""backbone.pth"""'], {}), "(self.client_output, 'backbone.pth')\n", (23315, 23351), False, 'import os\n'), ((23408, 23458), 'os.path.join', 'os.path.join', (['self.client_output', '"""bce_module.pth"""'], {}), "(self.client_output, 'bce_module.pth')\n", (23420, 23458), False, 'import os\n'), ((23526, 23574), 'os.path.join', 'os.path.join', (['self.client_output', '"""backbone.pth"""'], {}), "(self.client_output, 'backbone.pth')\n", (23538, 23574), False, 'import os\n'), ((24714, 24738), 'config.config.lr_func', 'cfg.lr_func', (['start_epoch'], {}), '(start_epoch)\n', (24725, 24738), True, 'from config import config as cfg\n'), ((25235, 25265), 'torch.cat', 'torch.cat', (['[imgs, imgs]'], {'dim': '(0)'}), '([imgs, imgs], dim=0)\n', (25244, 25265), False, 'import torch\n'), ((25293, 25320), 'torch.cat', 'torch.cat', (['[labels, labels]'], {}), '([labels, labels])\n', (25302, 25320), False, 'import torch\n'), ((2545, 2565), 'torch.nn.functional.normalize', 'F.normalize', (['self.fc'], {}), '(self.fc)\n', (2556, 2565), True, 'import torch.nn.functional as F\n'), ((2621, 2641), 'torch.nn.functional.normalize', 'F.normalize', (['self.fc'], {}), '(self.fc)\n', (2632, 2641), True, 'import torch.nn.functional as F\n'), ((16904, 16939), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['cos_logits', 'labels'], {}), '(cos_logits, labels)\n', (16919, 16939), True, 'import torch.nn.functional as F\n'), ((17612, 17647), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['cos_logits', 'labels'], {}), '(cos_logits, labels)\n', (17627, 17647), True, 'import torch.nn.functional as F\n'), ((19615, 19646), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (19630, 19646), True, 'import torch.nn.functional as F\n'), ((20149, 20180), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (20164, 20180), True, 'import torch.nn.functional as F\n'), ((8513, 8575), 'torch.where', 'torch.where', (['(similarity[i * batch:(i + 1) * batch] > threshold)'], {}), '(similarity[i * batch:(i + 1) * batch] > threshold)\n', (8524, 8575), False, 'import torch\n'), ((9473, 9508), 'torch.where', 'torch.where', (['(similarity > threshold)'], {}), '(similarity > threshold)\n', (9484, 9508), False, 'import torch\n'), ((15943, 15958), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15956, 15958), False, 'import torch\n'), ((16572, 16610), 'torch.stack', 'torch.stack', (['[pos_sim, neg_sim]'], {'dim': '(1)'}), '([pos_sim, neg_sim], dim=1)\n', (16583, 16610), False, 'import torch\n'), ((18749, 18764), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18762, 18764), False, 'import torch\n'), ((19300, 19338), 'torch.stack', 'torch.stack', (['[pos_sim, neg_sim]'], {'dim': '(1)'}), '([pos_sim, neg_sim], dim=1)\n', (19311, 19338), False, 'import torch\n'), ((13817, 13840), 'copy.deepcopy', 'copy.deepcopy', (['backbone'], {}), '(backbone)\n', (13830, 13840), False, 'import copy\n')] |
import json
import os
import tempfile
import catboost as cb
import numpy as np
import utils
from config import OUTPUT_DIR
def binary_classification_simple_on_dataframe():
learn_set_path = tempfile.mkstemp(prefix='catboost_learn_set_')[1]
cd_path = tempfile.mkstemp(prefix='catboost_cd_')[1]
try:
utils.object_list_to_tsv(
[
(0.1, 0.2, 0.11, 1),
(0.97, 0.82, 0.33, 2),
(0.13, 0.22, 0.23, 2),
(0.14, 0.18, 0.1, 1),
(0.9, 0.67, 0.17, 2),
(0.66, 0.1, 0.31, 1)
],
learn_set_path
)
with open(cd_path, 'w') as cd:
cd.write('3\tTarget')
model = utils.run_dist_train(
['--iterations', '20',
'--loss-function', 'Logloss',
'--learn-set', learn_set_path,
'--cd', cd_path
],
model_class=cb.CatBoostClassifier
)
train_pool = cb.Pool(learn_set_path, column_description=cd_path)
result = {}
raw_predictions = np.array(model.predict(train_pool, prediction_type='RawFormulaVal'), ndmin=2).transpose()
result['raw_prediction'] = np.hstack((np.negative(raw_predictions / 2), raw_predictions / 2)).tolist()
result['probability'] = model.predict_proba(train_pool).tolist()
result['prediction'] = model.predict(train_pool).tolist()
json.dump(
result,
fp=open(os.path.join(OUTPUT_DIR, 'binary_classification_simple_on_dataframe_predictions.json'), 'w'),
allow_nan=True,
indent=2
)
finally:
os.remove(learn_set_path)
os.remove(cd_path)
def simple_binary_classification():
learn_set_path = tempfile.mkstemp(prefix='catboost_learn_set_')[1]
cd_path = tempfile.mkstemp(prefix='catboost_cd_')[1]
try:
utils.object_list_to_tsv(
[
(0.1, 0.2, 0.11, "0", "query0", 1.0, "site1", 0.12),
(0.97, 0.82, 0.33, "0", "query0", 1.0, "site22", 0.18),
(0.13, 0.22, 0.23, "1", "query1", 0.0, "Site9", 1.0),
(0.14, 0.18, 0.1, "1", "Query 2", 0.5, "site12", 0.45),
(0.9, 0.67, 0.17, "0", "Query 2", 0.5, "site22", 1.0),
(0.66, 0.1, 0.31, "1", "Query 2", 0.5, "Site45", 2.0)
],
learn_set_path
)
with open(cd_path, 'w') as cd:
cd.write(
"3\tTarget\n"
+ "4\tGroupId\n"
+ "5\tGroupWeight\n"
+ "6\tSubgroupId\n"
+ "7\tWeight\n"
)
model = utils.run_dist_train(
['--iterations', '20',
'--loss-function', 'Logloss',
'--learn-set', learn_set_path,
'--cd', cd_path
],
model_class=cb.CatBoostClassifier
)
train_pool = cb.Pool(learn_set_path, column_description=cd_path)
result = {}
raw_predictions = np.array(model.predict(train_pool, prediction_type='RawFormulaVal'), ndmin=2).transpose()
result['raw_prediction'] = np.hstack((np.negative(raw_predictions / 2), raw_predictions / 2)).tolist()
result['probability'] = model.predict_proba(train_pool).tolist()
result['prediction'] = model.predict(train_pool).tolist()
json.dump(
result,
fp=open(os.path.join(OUTPUT_DIR, 'simple_binary_classification.json'), 'w'),
allow_nan=True,
indent=2
)
finally:
os.remove(learn_set_path)
os.remove(cd_path)
def binary_classification_with_target_border():
learn_set_path = tempfile.mkstemp(prefix='catboost_learn_set_')[1]
cd_path = tempfile.mkstemp(prefix='catboost_cd_')[1]
try:
utils.object_list_to_tsv(
[
(0.1, 0.2, 0.11, 0.12),
(0.97, 0.82, 0.33, 0.1),
(0.13, 0.22, 0.23, 0.7),
(0.14, 0.18, 0.1, 0.33),
(0.9, 0.67, 0.17, 0.82),
(0.66, 0.1, 0.31, 0.93)
],
learn_set_path
)
with open(cd_path, 'w') as cd:
cd.write('3\tTarget')
model = utils.run_dist_train(
['--iterations', '20',
'--target-border', '0.5',
'--loss-function', 'Logloss',
'--learn-set', learn_set_path,
'--cd', cd_path
],
model_class=cb.CatBoostClassifier
)
train_pool = cb.Pool(learn_set_path, column_description=cd_path)
result = {}
raw_predictions = np.array(model.predict(train_pool, prediction_type='RawFormulaVal'), ndmin=2).transpose()
result['raw_prediction'] = np.hstack((np.negative(raw_predictions / 2), raw_predictions / 2)).tolist()
result['probability'] = model.predict_proba(train_pool).tolist()
result['prediction'] = model.predict(train_pool).tolist()
json.dump(
result,
fp=open(os.path.join(OUTPUT_DIR, 'binary_classification_with_target_border.json'), 'w'),
allow_nan=True,
indent=2
)
finally:
os.remove(learn_set_path)
os.remove(cd_path)
def binary_classification_with_class_weights_map():
learn_set_path = tempfile.mkstemp(prefix='catboost_learn_set_')[1]
cd_path = tempfile.mkstemp(prefix='catboost_cd_')[1]
try:
utils.object_list_to_tsv(
[
(0.1, 0.2, 0.11, 0),
(0.97, 0.82, 0.33, 1),
(0.13, 0.22, 0.23, 1),
(0.14, 0.18, 0.1, 0),
(0.9, 0.67, 0.17, 0),
(0.66, 0.1, 0.31, 0)
],
learn_set_path
)
with open(cd_path, 'w') as cd:
cd.write('3\tTarget')
model = utils.run_dist_train(
['--iterations', '20',
'--class-weights', '1,2',
'--loss-function', 'Logloss',
'--learn-set', learn_set_path,
'--cd', cd_path,
],
model_class=cb.CatBoostClassifier
)
train_pool = cb.Pool(learn_set_path, column_description=cd_path)
result = {}
raw_predictions = np.array(model.predict(train_pool, prediction_type='RawFormulaVal'), ndmin=2).transpose()
result['raw_prediction'] = np.hstack((np.negative(raw_predictions / 2), raw_predictions / 2)).tolist()
result['probability'] = model.predict_proba(train_pool).tolist()
result['prediction'] = model.predict(train_pool).tolist()
json.dump(
result,
fp=open(os.path.join(OUTPUT_DIR, 'binary_classification_with_class_weights_map.json'), 'w'),
allow_nan=True,
indent=2
)
finally:
os.remove(learn_set_path)
os.remove(cd_path)
def binary_classification_with_weights():
learn_set_path = tempfile.mkstemp(prefix='catboost_learn_set_')[1]
cd_path = tempfile.mkstemp(prefix='catboost_cd_')[1]
try:
utils.object_list_to_tsv(
[
(0.1, 0.2, 0.11, 0, 1.0),
(0.97, 0.82, 0.33, 1, 2.0),
(0.13, 0.22, 0.23, 1, 2.0),
(0.14, 0.18, 0.1, 0, 1.0),
(0.9, 0.67, 0.17, 0, 1.0),
(0.66, 0.1, 0.31, 0, 1.0)
],
learn_set_path
)
with open(cd_path, 'w') as cd:
cd.write(
'3\tTarget'
+ '\n4\tWeight'
)
model = utils.run_dist_train(
['--iterations', '20',
'--loss-function', 'Logloss',
'--learn-set', learn_set_path,
'--cd', cd_path,
],
model_class=cb.CatBoostClassifier
)
train_pool = cb.Pool(learn_set_path, column_description=cd_path)
result = {}
raw_predictions = np.array(model.predict(train_pool, prediction_type='RawFormulaVal'), ndmin=2).transpose()
result['raw_prediction'] = np.hstack((np.negative(raw_predictions / 2), raw_predictions / 2)).tolist()
result['probability'] = model.predict_proba(train_pool).tolist()
result['prediction'] = model.predict(train_pool).tolist()
json.dump(
result,
fp=open(os.path.join(OUTPUT_DIR, 'binary_classification_with_weights.json'), 'w'),
allow_nan=True,
indent=2
)
finally:
os.remove(learn_set_path)
os.remove(cd_path)
def simple_multi_classification():
learn_set_path = tempfile.mkstemp(prefix='catboost_learn_set_')[1]
cd_path = tempfile.mkstemp(prefix='catboost_cd_')[1]
try:
utils.object_list_to_tsv(
[
(0.13, 0.22, 0.23, "1", "query1", 0.0, "Site9", 1.0),
(0.1, 0.2, 0.11, "2", "query0", 1.0, "site1", 0.12),
(0.97, 0.82, 0.33, "0", "query0", 1.0, "site22", 0.18),
(0.9, 0.67, 0.17, "0", "Query 2", 0.5, "site22", 1.0),
(0.66, 0.1, 0.31, "2", "Query 2", 0.5, "Site45", 2.0),
(0.14, 0.18, 0.1, "1", "Query 2", 0.5, "site12", 0.45)
],
learn_set_path
)
with open(cd_path, 'w') as cd:
cd.write(
"3\tTarget\n"
+ "4\tGroupId\n"
+ "5\tGroupWeight\n"
+ "6\tSubgroupId\n"
+ "7\tWeight\n"
)
model = utils.run_dist_train(
['--iterations', '20',
'--loss-function', 'MultiClass',
'--learn-set', learn_set_path,
'--cd', cd_path
],
model_class=cb.CatBoostClassifier
)
train_pool = cb.Pool(learn_set_path, column_description=cd_path)
result = {}
result['raw_prediction'] = model.predict(train_pool, prediction_type='RawFormulaVal').tolist()
result['probability'] = model.predict_proba(train_pool).tolist()
result['prediction'] = model.predict(train_pool).tolist()
json.dump(
result,
fp=open(os.path.join(OUTPUT_DIR, 'simple_multi_classification.json'), 'w'),
allow_nan=True,
indent=2
)
finally:
os.remove(learn_set_path)
os.remove(cd_path)
def main():
binary_classification_simple_on_dataframe()
simple_binary_classification()
binary_classification_with_target_border()
binary_classification_with_class_weights_map()
binary_classification_with_weights()
simple_multi_classification()
| [
"catboost.Pool",
"os.path.join",
"numpy.negative",
"utils.run_dist_train",
"tempfile.mkstemp",
"utils.object_list_to_tsv",
"os.remove"
] | [((197, 243), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_learn_set_"""'}), "(prefix='catboost_learn_set_')\n", (213, 243), False, 'import tempfile\n'), ((261, 300), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_cd_"""'}), "(prefix='catboost_cd_')\n", (277, 300), False, 'import tempfile\n'), ((322, 505), 'utils.object_list_to_tsv', 'utils.object_list_to_tsv', (['[(0.1, 0.2, 0.11, 1), (0.97, 0.82, 0.33, 2), (0.13, 0.22, 0.23, 2), (0.14, \n 0.18, 0.1, 1), (0.9, 0.67, 0.17, 2), (0.66, 0.1, 0.31, 1)]', 'learn_set_path'], {}), '([(0.1, 0.2, 0.11, 1), (0.97, 0.82, 0.33, 2), (0.13,\n 0.22, 0.23, 2), (0.14, 0.18, 0.1, 1), (0.9, 0.67, 0.17, 2), (0.66, 0.1,\n 0.31, 1)], learn_set_path)\n', (346, 505), False, 'import utils\n'), ((732, 898), 'utils.run_dist_train', 'utils.run_dist_train', (["['--iterations', '20', '--loss-function', 'Logloss', '--learn-set',\n learn_set_path, '--cd', cd_path]"], {'model_class': 'cb.CatBoostClassifier'}), "(['--iterations', '20', '--loss-function', 'Logloss',\n '--learn-set', learn_set_path, '--cd', cd_path], model_class=cb.\n CatBoostClassifier)\n", (752, 898), False, 'import utils\n'), ((997, 1048), 'catboost.Pool', 'cb.Pool', (['learn_set_path'], {'column_description': 'cd_path'}), '(learn_set_path, column_description=cd_path)\n', (1004, 1048), True, 'import catboost as cb\n'), ((1672, 1697), 'os.remove', 'os.remove', (['learn_set_path'], {}), '(learn_set_path)\n', (1681, 1697), False, 'import os\n'), ((1706, 1724), 'os.remove', 'os.remove', (['cd_path'], {}), '(cd_path)\n', (1715, 1724), False, 'import os\n'), ((1784, 1830), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_learn_set_"""'}), "(prefix='catboost_learn_set_')\n", (1800, 1830), False, 'import tempfile\n'), ((1848, 1887), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_cd_"""'}), "(prefix='catboost_cd_')\n", (1864, 1887), False, 'import tempfile\n'), ((1909, 2302), 'utils.object_list_to_tsv', 'utils.object_list_to_tsv', (["[(0.1, 0.2, 0.11, '0', 'query0', 1.0, 'site1', 0.12), (0.97, 0.82, 0.33,\n '0', 'query0', 1.0, 'site22', 0.18), (0.13, 0.22, 0.23, '1', 'query1', \n 0.0, 'Site9', 1.0), (0.14, 0.18, 0.1, '1', 'Query 2', 0.5, 'site12', \n 0.45), (0.9, 0.67, 0.17, '0', 'Query 2', 0.5, 'site22', 1.0), (0.66, \n 0.1, 0.31, '1', 'Query 2', 0.5, 'Site45', 2.0)]", 'learn_set_path'], {}), "([(0.1, 0.2, 0.11, '0', 'query0', 1.0, 'site1', \n 0.12), (0.97, 0.82, 0.33, '0', 'query0', 1.0, 'site22', 0.18), (0.13, \n 0.22, 0.23, '1', 'query1', 0.0, 'Site9', 1.0), (0.14, 0.18, 0.1, '1',\n 'Query 2', 0.5, 'site12', 0.45), (0.9, 0.67, 0.17, '0', 'Query 2', 0.5,\n 'site22', 1.0), (0.66, 0.1, 0.31, '1', 'Query 2', 0.5, 'Site45', 2.0)],\n learn_set_path)\n", (1933, 2302), False, 'import utils\n'), ((2685, 2851), 'utils.run_dist_train', 'utils.run_dist_train', (["['--iterations', '20', '--loss-function', 'Logloss', '--learn-set',\n learn_set_path, '--cd', cd_path]"], {'model_class': 'cb.CatBoostClassifier'}), "(['--iterations', '20', '--loss-function', 'Logloss',\n '--learn-set', learn_set_path, '--cd', cd_path], model_class=cb.\n CatBoostClassifier)\n", (2705, 2851), False, 'import utils\n'), ((2950, 3001), 'catboost.Pool', 'cb.Pool', (['learn_set_path'], {'column_description': 'cd_path'}), '(learn_set_path, column_description=cd_path)\n', (2957, 3001), True, 'import catboost as cb\n'), ((3600, 3625), 'os.remove', 'os.remove', (['learn_set_path'], {}), '(learn_set_path)\n', (3609, 3625), False, 'import os\n'), ((3634, 3652), 'os.remove', 'os.remove', (['cd_path'], {}), '(cd_path)\n', (3643, 3652), False, 'import os\n'), ((3724, 3770), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_learn_set_"""'}), "(prefix='catboost_learn_set_')\n", (3740, 3770), False, 'import tempfile\n'), ((3788, 3827), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_cd_"""'}), "(prefix='catboost_cd_')\n", (3804, 3827), False, 'import tempfile\n'), ((3849, 4049), 'utils.object_list_to_tsv', 'utils.object_list_to_tsv', (['[(0.1, 0.2, 0.11, 0.12), (0.97, 0.82, 0.33, 0.1), (0.13, 0.22, 0.23, 0.7),\n (0.14, 0.18, 0.1, 0.33), (0.9, 0.67, 0.17, 0.82), (0.66, 0.1, 0.31, 0.93)]', 'learn_set_path'], {}), '([(0.1, 0.2, 0.11, 0.12), (0.97, 0.82, 0.33, 0.1),\n (0.13, 0.22, 0.23, 0.7), (0.14, 0.18, 0.1, 0.33), (0.9, 0.67, 0.17, \n 0.82), (0.66, 0.1, 0.31, 0.93)], learn_set_path)\n', (3873, 4049), False, 'import utils\n'), ((4275, 4466), 'utils.run_dist_train', 'utils.run_dist_train', (["['--iterations', '20', '--target-border', '0.5', '--loss-function',\n 'Logloss', '--learn-set', learn_set_path, '--cd', cd_path]"], {'model_class': 'cb.CatBoostClassifier'}), "(['--iterations', '20', '--target-border', '0.5',\n '--loss-function', 'Logloss', '--learn-set', learn_set_path, '--cd',\n cd_path], model_class=cb.CatBoostClassifier)\n", (4295, 4466), False, 'import utils\n'), ((4579, 4630), 'catboost.Pool', 'cb.Pool', (['learn_set_path'], {'column_description': 'cd_path'}), '(learn_set_path, column_description=cd_path)\n', (4586, 4630), True, 'import catboost as cb\n'), ((5241, 5266), 'os.remove', 'os.remove', (['learn_set_path'], {}), '(learn_set_path)\n', (5250, 5266), False, 'import os\n'), ((5275, 5293), 'os.remove', 'os.remove', (['cd_path'], {}), '(cd_path)\n', (5284, 5293), False, 'import os\n'), ((5369, 5415), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_learn_set_"""'}), "(prefix='catboost_learn_set_')\n", (5385, 5415), False, 'import tempfile\n'), ((5433, 5472), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_cd_"""'}), "(prefix='catboost_cd_')\n", (5449, 5472), False, 'import tempfile\n'), ((5494, 5677), 'utils.object_list_to_tsv', 'utils.object_list_to_tsv', (['[(0.1, 0.2, 0.11, 0), (0.97, 0.82, 0.33, 1), (0.13, 0.22, 0.23, 1), (0.14, \n 0.18, 0.1, 0), (0.9, 0.67, 0.17, 0), (0.66, 0.1, 0.31, 0)]', 'learn_set_path'], {}), '([(0.1, 0.2, 0.11, 0), (0.97, 0.82, 0.33, 1), (0.13,\n 0.22, 0.23, 1), (0.14, 0.18, 0.1, 0), (0.9, 0.67, 0.17, 0), (0.66, 0.1,\n 0.31, 0)], learn_set_path)\n', (5518, 5677), False, 'import utils\n'), ((5904, 6095), 'utils.run_dist_train', 'utils.run_dist_train', (["['--iterations', '20', '--class-weights', '1,2', '--loss-function',\n 'Logloss', '--learn-set', learn_set_path, '--cd', cd_path]"], {'model_class': 'cb.CatBoostClassifier'}), "(['--iterations', '20', '--class-weights', '1,2',\n '--loss-function', 'Logloss', '--learn-set', learn_set_path, '--cd',\n cd_path], model_class=cb.CatBoostClassifier)\n", (5924, 6095), False, 'import utils\n'), ((6209, 6260), 'catboost.Pool', 'cb.Pool', (['learn_set_path'], {'column_description': 'cd_path'}), '(learn_set_path, column_description=cd_path)\n', (6216, 6260), True, 'import catboost as cb\n'), ((6875, 6900), 'os.remove', 'os.remove', (['learn_set_path'], {}), '(learn_set_path)\n', (6884, 6900), False, 'import os\n'), ((6909, 6927), 'os.remove', 'os.remove', (['cd_path'], {}), '(cd_path)\n', (6918, 6927), False, 'import os\n'), ((6993, 7039), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_learn_set_"""'}), "(prefix='catboost_learn_set_')\n", (7009, 7039), False, 'import tempfile\n'), ((7057, 7096), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_cd_"""'}), "(prefix='catboost_cd_')\n", (7073, 7096), False, 'import tempfile\n'), ((7118, 7332), 'utils.object_list_to_tsv', 'utils.object_list_to_tsv', (['[(0.1, 0.2, 0.11, 0, 1.0), (0.97, 0.82, 0.33, 1, 2.0), (0.13, 0.22, 0.23, 1,\n 2.0), (0.14, 0.18, 0.1, 0, 1.0), (0.9, 0.67, 0.17, 0, 1.0), (0.66, 0.1,\n 0.31, 0, 1.0)]', 'learn_set_path'], {}), '([(0.1, 0.2, 0.11, 0, 1.0), (0.97, 0.82, 0.33, 1, \n 2.0), (0.13, 0.22, 0.23, 1, 2.0), (0.14, 0.18, 0.1, 0, 1.0), (0.9, 0.67,\n 0.17, 0, 1.0), (0.66, 0.1, 0.31, 0, 1.0)], learn_set_path)\n', (7142, 7332), False, 'import utils\n'), ((7622, 7788), 'utils.run_dist_train', 'utils.run_dist_train', (["['--iterations', '20', '--loss-function', 'Logloss', '--learn-set',\n learn_set_path, '--cd', cd_path]"], {'model_class': 'cb.CatBoostClassifier'}), "(['--iterations', '20', '--loss-function', 'Logloss',\n '--learn-set', learn_set_path, '--cd', cd_path], model_class=cb.\n CatBoostClassifier)\n", (7642, 7788), False, 'import utils\n'), ((7888, 7939), 'catboost.Pool', 'cb.Pool', (['learn_set_path'], {'column_description': 'cd_path'}), '(learn_set_path, column_description=cd_path)\n', (7895, 7939), True, 'import catboost as cb\n'), ((8544, 8569), 'os.remove', 'os.remove', (['learn_set_path'], {}), '(learn_set_path)\n', (8553, 8569), False, 'import os\n'), ((8578, 8596), 'os.remove', 'os.remove', (['cd_path'], {}), '(cd_path)\n', (8587, 8596), False, 'import os\n'), ((8655, 8701), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_learn_set_"""'}), "(prefix='catboost_learn_set_')\n", (8671, 8701), False, 'import tempfile\n'), ((8719, 8758), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""catboost_cd_"""'}), "(prefix='catboost_cd_')\n", (8735, 8758), False, 'import tempfile\n'), ((8780, 9172), 'utils.object_list_to_tsv', 'utils.object_list_to_tsv', (["[(0.13, 0.22, 0.23, '1', 'query1', 0.0, 'Site9', 1.0), (0.1, 0.2, 0.11, '2',\n 'query0', 1.0, 'site1', 0.12), (0.97, 0.82, 0.33, '0', 'query0', 1.0,\n 'site22', 0.18), (0.9, 0.67, 0.17, '0', 'Query 2', 0.5, 'site22', 1.0),\n (0.66, 0.1, 0.31, '2', 'Query 2', 0.5, 'Site45', 2.0), (0.14, 0.18, 0.1,\n '1', 'Query 2', 0.5, 'site12', 0.45)]", 'learn_set_path'], {}), "([(0.13, 0.22, 0.23, '1', 'query1', 0.0, 'Site9', \n 1.0), (0.1, 0.2, 0.11, '2', 'query0', 1.0, 'site1', 0.12), (0.97, 0.82,\n 0.33, '0', 'query0', 1.0, 'site22', 0.18), (0.9, 0.67, 0.17, '0',\n 'Query 2', 0.5, 'site22', 1.0), (0.66, 0.1, 0.31, '2', 'Query 2', 0.5,\n 'Site45', 2.0), (0.14, 0.18, 0.1, '1', 'Query 2', 0.5, 'site12', 0.45)],\n learn_set_path)\n", (8804, 9172), False, 'import utils\n'), ((9556, 9725), 'utils.run_dist_train', 'utils.run_dist_train', (["['--iterations', '20', '--loss-function', 'MultiClass', '--learn-set',\n learn_set_path, '--cd', cd_path]"], {'model_class': 'cb.CatBoostClassifier'}), "(['--iterations', '20', '--loss-function', 'MultiClass',\n '--learn-set', learn_set_path, '--cd', cd_path], model_class=cb.\n CatBoostClassifier)\n", (9576, 9725), False, 'import utils\n'), ((9824, 9875), 'catboost.Pool', 'cb.Pool', (['learn_set_path'], {'column_description': 'cd_path'}), '(learn_set_path, column_description=cd_path)\n', (9831, 9875), True, 'import catboost as cb\n'), ((10349, 10374), 'os.remove', 'os.remove', (['learn_set_path'], {}), '(learn_set_path)\n', (10358, 10374), False, 'import os\n'), ((10383, 10401), 'os.remove', 'os.remove', (['cd_path'], {}), '(cd_path)\n', (10392, 10401), False, 'import os\n'), ((1497, 1587), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""binary_classification_simple_on_dataframe_predictions.json"""'], {}), "(OUTPUT_DIR,\n 'binary_classification_simple_on_dataframe_predictions.json')\n", (1509, 1587), False, 'import os\n'), ((3450, 3511), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""simple_binary_classification.json"""'], {}), "(OUTPUT_DIR, 'simple_binary_classification.json')\n", (3462, 3511), False, 'import os\n'), ((5079, 5152), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""binary_classification_with_target_border.json"""'], {}), "(OUTPUT_DIR, 'binary_classification_with_target_border.json')\n", (5091, 5152), False, 'import os\n'), ((6709, 6786), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""binary_classification_with_class_weights_map.json"""'], {}), "(OUTPUT_DIR, 'binary_classification_with_class_weights_map.json')\n", (6721, 6786), False, 'import os\n'), ((8388, 8455), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""binary_classification_with_weights.json"""'], {}), "(OUTPUT_DIR, 'binary_classification_with_weights.json')\n", (8400, 8455), False, 'import os\n'), ((10200, 10260), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', '"""simple_multi_classification.json"""'], {}), "(OUTPUT_DIR, 'simple_multi_classification.json')\n", (10212, 10260), False, 'import os\n'), ((1233, 1265), 'numpy.negative', 'np.negative', (['(raw_predictions / 2)'], {}), '(raw_predictions / 2)\n', (1244, 1265), True, 'import numpy as np\n'), ((3186, 3218), 'numpy.negative', 'np.negative', (['(raw_predictions / 2)'], {}), '(raw_predictions / 2)\n', (3197, 3218), True, 'import numpy as np\n'), ((4815, 4847), 'numpy.negative', 'np.negative', (['(raw_predictions / 2)'], {}), '(raw_predictions / 2)\n', (4826, 4847), True, 'import numpy as np\n'), ((6445, 6477), 'numpy.negative', 'np.negative', (['(raw_predictions / 2)'], {}), '(raw_predictions / 2)\n', (6456, 6477), True, 'import numpy as np\n'), ((8124, 8156), 'numpy.negative', 'np.negative', (['(raw_predictions / 2)'], {}), '(raw_predictions / 2)\n', (8135, 8156), True, 'import numpy as np\n')] |
import pyfstat
import numpy as np
# Properties of the GW data
sqrtSX = 1e-23
tstart = 1000000000
duration = 100 * 86400
tend = tstart + duration
# Properties of the signal
F0 = 30.0
F1 = -1e-10
F2 = 0
Alpha = np.radians(83.6292)
Delta = np.radians(22.0144)
tref = 0.5 * (tstart + tend)
depth = 10
h0 = sqrtSX / depth
label = "using_initialisation"
outdir = "data"
data = pyfstat.Writer(
label=label,
outdir=outdir,
tref=tref,
tstart=tstart,
F0=F0,
F1=F1,
F2=F2,
duration=duration,
Alpha=Alpha,
Delta=Delta,
h0=h0,
sqrtSX=sqrtSX,
)
data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat()
print("Predicted twoF value: {}\n".format(twoF))
DeltaF0 = 1e-7
DeltaF1 = 1e-13
VF0 = (np.pi * duration * DeltaF0) ** 2 / 3.0
VF1 = (np.pi * duration ** 2 * DeltaF1) ** 2 * 4 / 45.0
print("\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n".format(VF0 * VF1, VF0, VF1))
theta_prior = {
"F0": {"type": "unif", "lower": F0 - DeltaF0 / 2.0, "upper": F0 + DeltaF0 / 2.0},
"F1": {"type": "unif", "lower": F1 - DeltaF1 / 2.0, "upper": F1 + DeltaF1 / 2.0},
"F2": F2,
"Alpha": Alpha,
"Delta": Delta,
}
ntemps = 1
log10beta_min = -1
nwalkers = 100
nsteps = [100, 100]
mcmc = pyfstat.MCMCSearch(
label=label,
outdir=outdir,
sftfilepattern="{}/*{}*sft".format(outdir, label),
theta_prior=theta_prior,
tref=tref,
minStartTime=tstart,
maxStartTime=tend,
nsteps=nsteps,
nwalkers=nwalkers,
ntemps=ntemps,
log10beta_min=log10beta_min,
)
mcmc.setup_initialisation(100, scatter_val=1e-10)
mcmc.run()
mcmc.plot_corner(add_prior=True)
mcmc.print_summary()
| [
"numpy.radians",
"pyfstat.Writer"
] | [((211, 230), 'numpy.radians', 'np.radians', (['(83.6292)'], {}), '(83.6292)\n', (221, 230), True, 'import numpy as np\n'), ((239, 258), 'numpy.radians', 'np.radians', (['(22.0144)'], {}), '(22.0144)\n', (249, 258), True, 'import numpy as np\n'), ((375, 539), 'pyfstat.Writer', 'pyfstat.Writer', ([], {'label': 'label', 'outdir': 'outdir', 'tref': 'tref', 'tstart': 'tstart', 'F0': 'F0', 'F1': 'F1', 'F2': 'F2', 'duration': 'duration', 'Alpha': 'Alpha', 'Delta': 'Delta', 'h0': 'h0', 'sqrtSX': 'sqrtSX'}), '(label=label, outdir=outdir, tref=tref, tstart=tstart, F0=F0,\n F1=F1, F2=F2, duration=duration, Alpha=Alpha, Delta=Delta, h0=h0,\n sqrtSX=sqrtSX)\n', (389, 539), False, 'import pyfstat\n')] |
import keras
from keras import backend as K
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import math
config = {
'inputs': 28*28,
'layer_sizes': [600, 500, 400, 300, 200, 150, 100, 75, 50, 30, 20, 10, 4],
'batch_size': 64,
'bottleneck_cells': 20., # TODO 8.,
'epochs': 100,
'label_num': 10, # How many labels there are in the data
'samples': 20000 # How many data points to predict on in the evaluation phase (unless the test set is used)
}
# Custom loss
# ----------------------
def bottleneck_loss(y_true, y_pred):
# y_true_prot = tf.stop_gradient(y_true)
loss = 1. - 1./( 1. + tf.math.exp(-25.*((y_pred-.5)**2.)) )
loss *= 2.
# loss = 1. - tf.math.cos(y_pred * math.pi * .8)
# loss = 0.
return tf.reduce_mean(loss)
# Construct the model
# ----------------------
inputs = keras.layers.Input(shape=(config['inputs'],), dtype='float32')
x = inputs
for ix, size in enumerate(config['layer_sizes']):
x = keras.layers.Dense(size, name="encode/dense{}".format(ix))(x)
x = keras.layers.Activation('sigmoid')(x)
x = keras.layers.BatchNormalization(name="encode/bn{}".format(ix))(x)
x = keras.layers.Activation('sigmoid')(x) # to ensure bottleneck signal is 0-1
bottleneck = x
quantised_bottleneck = keras.layers.Lambda((lambda y: tf.math.floor(y*(config['bottleneck_cells']-.0001))/config['bottleneck_cells'] ))(x)
decoder_input = keras.layers.Input(shape=(config['layer_sizes'][-1],), dtype='float32')
dx = decoder_input
for ix, size in enumerate(reversed(config['layer_sizes'])):
dx = keras.layers.Dense(size, name="decode/dense{}".format(ix))(dx)
dx = keras.layers.Activation('sigmoid')(dx)
dx = keras.layers.BatchNormalization(name="decode/bn{}".format(ix))(dx)
dx = keras.layers.Dense(config['inputs'], name="decode/denseFinal")(dx)
decoder_model = keras.models.Model(inputs=[decoder_input], outputs=[dx])
decoded = decoder_model(bottleneck)
quantised_decoded = decoder_model(quantised_bottleneck)
# Name the outputs
decoded = keras.layers.Lambda((lambda y: y), name='decoded')(decoded)
bottleneck = keras.layers.Lambda((lambda y: y), name='bottleneck')(bottleneck)
quantised_decoded = keras.layers.Lambda((lambda y: y), name='quantised_decoded')(quantised_decoded)
model = keras.models.Model(inputs=[inputs], outputs=[decoded, bottleneck, quantised_decoded])
model.compile(
loss={'decoded': 'mse', 'bottleneck': bottleneck_loss, 'quantised_decoded': 'mse'},
loss_weights={'decoded': 1., 'bottleneck': .01, 'quantised_decoded': 2.},
optimizer=keras.optimizers.Adadelta()
)
# Load and prepare the data
# ----------------------
# We don't use the labels; we are interested in how the model classifies the images
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
x_train = np.reshape(x_train, (-1, 28*28))
# Train the model
# ----------------------
# The dummy target for the bottleneck loss is not used
model.fit(x_train, [x_train, np.zeros((x_train.shape[0], 1)), x_train], batch_size=config['batch_size'], epochs=config['epochs'])
# Run a sample of the data through the model
# ----------------------
bottleneck_dimensions = config['layer_sizes'][-1]
if True: # sample from training data
samples = x_train[0:config['samples']]
label_index = y_train[0:config['samples']]
else: # use the test set
samples = y_train
label_index = y_test
sample_size = samples.shape[0]
code = model.predict(samples)
code = code[1] # select the bottleneck output
# Plot where the sample data points are in the bottleneck "cube", colored by their labels
for i in range(bottleneck_dimensions):
for j in range(i+1, bottleneck_dimensions):
plt.scatter(code[:,i], code[:,j], s=.4, c=label_index)
plt.title("Bottleneck scatter Dimensions i={} j={}".format(i, j))
plt.savefig("out/bottleneck_scatter_4fq20_{}_{}.png".format(i, j), dpi=200)
# Collect stats on what kind of data points are in the clusters (corners)
cluster_mask = np.zeros((bottleneck_dimensions,)) # to convert bitmask to index
for i in range(bottleneck_dimensions):
cluster_mask[i] = 2**i # should mirror the above {MASK}
# This holds the cluster (corner) index for each data point in the sample
cluster_index = np.sum(np.floor(code*1.9999) * cluster_mask, axis=1)
cluster_sizes = np.zeros((2**bottleneck_dimensions,))
cross_analysis = np.zeros((config['label_num'], 2**bottleneck_dimensions))
for i in range(sample_size):
cluster_sizes[int(cluster_index[i])] += 1
cross_analysis[int(label_index[i]), int(cluster_index[i])] += 1
print("Clusters vs labels")
for j in range(config['label_num']):
print("L{:6.0f}".format(j), end='')
print(" <- Labels")
for i in range(2**bottleneck_dimensions):
for j in range(config['label_num']):
print("{:7.0f}".format(cross_analysis[j,i]), end='')
print(" Cluster {:2.0f} size {:6.0f}".format(i, cluster_sizes[i]))
# Generate images from the extreme points
# ----------------------
for cluster in range(2**bottleneck_dimensions):
input = np.zeros((1, bottleneck_dimensions))
for i in range(bottleneck_dimensions):
if (cluster & (2**i)):
input[0, i] = 1
decoded = decoder_model.predict(input)[0]
decoded = np.reshape(decoded, (28, 28))
plt.imsave("out/decoded_4fq20_{}.png".format(cluster), decoded)
| [
"numpy.reshape",
"tensorflow.math.exp",
"tensorflow.keras.datasets.mnist.load_data",
"keras.layers.Lambda",
"tensorflow.math.floor",
"numpy.floor",
"keras.layers.Input",
"numpy.zeros",
"keras.models.Model",
"keras.layers.Activation",
"matplotlib.pyplot.scatter",
"keras.layers.Dense",
"tensor... | [((881, 943), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': "(config['inputs'],)", 'dtype': '"""float32"""'}), "(shape=(config['inputs'],), dtype='float32')\n", (899, 943), False, 'import keras\n'), ((1447, 1518), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': "(config['layer_sizes'][-1],)", 'dtype': '"""float32"""'}), "(shape=(config['layer_sizes'][-1],), dtype='float32')\n", (1465, 1518), False, 'import keras\n'), ((1882, 1938), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': '[decoder_input]', 'outputs': '[dx]'}), '(inputs=[decoder_input], outputs=[dx])\n', (1900, 1938), False, 'import keras\n'), ((2310, 2399), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': '[inputs]', 'outputs': '[decoded, bottleneck, quantised_decoded]'}), '(inputs=[inputs], outputs=[decoded, bottleneck,\n quantised_decoded])\n', (2328, 2399), False, 'import keras\n'), ((2799, 2834), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (2832, 2834), True, 'import tensorflow as tf\n'), ((2963, 2997), 'numpy.reshape', 'np.reshape', (['x_train', '(-1, 28 * 28)'], {}), '(x_train, (-1, 28 * 28))\n', (2973, 2997), True, 'import numpy as np\n'), ((4147, 4181), 'numpy.zeros', 'np.zeros', (['(bottleneck_dimensions,)'], {}), '((bottleneck_dimensions,))\n', (4155, 4181), True, 'import numpy as np\n'), ((4472, 4511), 'numpy.zeros', 'np.zeros', (['(2 ** bottleneck_dimensions,)'], {}), '((2 ** bottleneck_dimensions,))\n', (4480, 4511), True, 'import numpy as np\n'), ((4527, 4586), 'numpy.zeros', 'np.zeros', (["(config['label_num'], 2 ** bottleneck_dimensions)"], {}), "((config['label_num'], 2 ** bottleneck_dimensions))\n", (4535, 4586), True, 'import numpy as np\n'), ((801, 821), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (815, 821), True, 'import tensorflow as tf\n'), ((1200, 1234), 'keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (1223, 1234), False, 'import keras\n'), ((1799, 1861), 'keras.layers.Dense', 'keras.layers.Dense', (["config['inputs']"], {'name': '"""decode/denseFinal"""'}), "(config['inputs'], name='decode/denseFinal')\n", (1817, 1861), False, 'import keras\n'), ((2062, 2110), 'keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda y: y)'], {'name': '"""decoded"""'}), "(lambda y: y, name='decoded')\n", (2081, 2110), False, 'import keras\n'), ((2135, 2186), 'keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda y: y)'], {'name': '"""bottleneck"""'}), "(lambda y: y, name='bottleneck')\n", (2154, 2186), False, 'import keras\n'), ((2221, 2279), 'keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda y: y)'], {'name': '"""quantised_decoded"""'}), "(lambda y: y, name='quantised_decoded')\n", (2240, 2279), False, 'import keras\n'), ((5201, 5237), 'numpy.zeros', 'np.zeros', (['(1, bottleneck_dimensions)'], {}), '((1, bottleneck_dimensions))\n', (5209, 5237), True, 'import numpy as np\n'), ((5400, 5429), 'numpy.reshape', 'np.reshape', (['decoded', '(28, 28)'], {}), '(decoded, (28, 28))\n', (5410, 5429), True, 'import numpy as np\n'), ((1084, 1118), 'keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (1107, 1118), False, 'import keras\n'), ((1679, 1713), 'keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (1702, 1713), False, 'import keras\n'), ((2591, 2618), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (2616, 2618), False, 'import keras\n'), ((3125, 3156), 'numpy.zeros', 'np.zeros', (['(x_train.shape[0], 1)'], {}), '((x_train.shape[0], 1))\n', (3133, 3156), True, 'import numpy as np\n'), ((3843, 3900), 'matplotlib.pyplot.scatter', 'plt.scatter', (['code[:, i]', 'code[:, j]'], {'s': '(0.4)', 'c': 'label_index'}), '(code[:, i], code[:, j], s=0.4, c=label_index)\n', (3854, 3900), True, 'import matplotlib.pyplot as plt\n'), ((4409, 4432), 'numpy.floor', 'np.floor', (['(code * 1.9999)'], {}), '(code * 1.9999)\n', (4417, 4432), True, 'import numpy as np\n'), ((653, 695), 'tensorflow.math.exp', 'tf.math.exp', (['(-25.0 * (y_pred - 0.5) ** 2.0)'], {}), '(-25.0 * (y_pred - 0.5) ** 2.0)\n', (664, 695), True, 'import tensorflow as tf\n'), ((1345, 1401), 'tensorflow.math.floor', 'tf.math.floor', (["(y * (config['bottleneck_cells'] - 0.0001))"], {}), "(y * (config['bottleneck_cells'] - 0.0001))\n", (1358, 1401), True, 'import tensorflow as tf\n')] |
import cv2
import numpy as np
from pathlib import Path
class ObjectDetector:
def __init__(self, pipeline):
self.status = 0
self.pipeline = pipeline
self.device = None
self.q_nn = None
self.detection_nn = None
self.detections = []
self.blobPath = None
self.labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
self.personsDetected = []
def frameNorm(self, frame, bbox):
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
def getDeviceQueue(self, device):
self.device = device
self.q_nn = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
def createNeuralNetwork(self, cam_rgb):
self.blobPath = str((Path(__file__).parent.parent.parent / Path('models/mobilenet-ssd.blob')).resolve().absolute())
def returnBlobPath(self):
return self.blobPath
def createLinkOut(self):
self.xout_nn = self.pipeline.createXLinkOut()
self.xout_nn.setStreamName("detections")
return self.xout_nn
def detectPersons(self, frame):
self.personsDetected = []
for detection in self.detections:
if self.labelMap[detection.label] == "person":
self.personsDetected.append(detection)
bbox = self.frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.putText(frame, self.labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5,
255)
def update(self, frame):
in_nn = self.q_nn.tryGet()
if in_nn is not None:
self.detections = in_nn.detections
self.detectPersons(frame)
return frame
else:
return None
def getDetections(self):
return self.detections
def getPersonDetections(self):
return self.personsDetected | [
"numpy.array",
"pathlib.Path",
"cv2.putText"
] | [((1696, 1816), 'cv2.putText', 'cv2.putText', (['frame', 'self.labelMap[detection.label]', '(bbox[0] + 10, bbox[1] + 20)', 'cv2.FONT_HERSHEY_TRIPLEX', '(0.5)', '(255)'], {}), '(frame, self.labelMap[detection.label], (bbox[0] + 10, bbox[1] +\n 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n', (1707, 1816), False, 'import cv2\n'), ((749, 763), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (757, 763), True, 'import numpy as np\n'), ((1063, 1096), 'pathlib.Path', 'Path', (['"""models/mobilenet-ssd.blob"""'], {}), "('models/mobilenet-ssd.blob')\n", (1067, 1096), False, 'from pathlib import Path\n'), ((1025, 1039), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1029, 1039), False, 'from pathlib import Path\n')] |
import numpy as np
from .base import Prior, PriorException
from .interpolated import Interped
from .analytical import DeltaFunction, PowerLaw, Uniform, LogUniform, \
SymmetricLogUniform, Cosine, Sine, Gaussian, TruncatedGaussian, HalfGaussian, \
LogNormal, Exponential, StudentT, Beta, Logistic, Cauchy, Gamma, ChiSquared, FermiDirac
from ..utils import infer_args_from_method, infer_parameters_from_function
def conditional_prior_factory(prior_class):
class ConditionalPrior(prior_class):
def __init__(self, condition_func, name=None, latex_label=None, unit=None,
boundary=None, **reference_params):
"""
Parameters
==========
condition_func: func
Functional form of the condition for this prior. The first function argument
has to be a dictionary for the `reference_params` (see below). The following
arguments are the required variables that are required before we can draw this
prior.
It needs to return a dictionary with the modified values for the
`reference_params` that are being used in the next draw.
For example if we have a Uniform prior for `x` depending on a different variable `y`
`p(x|y)` with the boundaries linearly depending on y, then this
could have the following form:
.. code-block:: python
def condition_func(reference_params, y):
return dict(
minimum=reference_params['minimum'] + y,
maximum=reference_params['maximum'] + y
)
name: str, optional
See superclass
latex_label: str, optional
See superclass
unit: str, optional
See superclass
boundary: str, optional
See superclass
reference_params:
Initial values for attributes such as `minimum`, `maximum`.
This differs on the `prior_class`, for example for the Gaussian
prior this is `mu` and `sigma`.
"""
if 'boundary' in infer_args_from_method(super(ConditionalPrior, self).__init__):
super(ConditionalPrior, self).__init__(name=name, latex_label=latex_label,
unit=unit, boundary=boundary, **reference_params)
else:
super(ConditionalPrior, self).__init__(name=name, latex_label=latex_label,
unit=unit, **reference_params)
self._required_variables = None
self.condition_func = condition_func
self._reference_params = reference_params
self.__class__.__name__ = 'Conditional{}'.format(prior_class.__name__)
self.__class__.__qualname__ = 'Conditional{}'.format(prior_class.__qualname__)
def sample(self, size=None, **required_variables):
"""Draw a sample from the prior
Parameters
==========
size: int or tuple of ints, optional
See superclass
required_variables:
Any required variables that this prior depends on
Returns
=======
float: See superclass
"""
self.least_recently_sampled = self.rescale(np.random.uniform(0, 1, size), **required_variables)
return self.least_recently_sampled
def rescale(self, val, **required_variables):
"""
'Rescale' a sample from the unit line element to the prior.
Parameters
==========
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).rescale(val)
def prob(self, val, **required_variables):
"""Return the prior probability of val.
Parameters
==========
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
Returns
=======
float: Prior probability of val
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).prob(val)
def ln_prob(self, val, **required_variables):
"""Return the natural log prior probability of val.
Parameters
==========
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
Returns
=======
float: Natural log prior probability of val
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).ln_prob(val)
def cdf(self, val, **required_variables):
"""Return the cdf of val.
Parameters
==========
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
Returns
=======
float: CDF of val
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).cdf(val)
def update_conditions(self, **required_variables):
"""
This method updates the conditional parameters (depending on the parent class
this could be e.g. `minimum`, `maximum`, `mu`, `sigma`, etc.) of this prior
class depending on the required variables it depends on.
If no variables are given, the most recently used conditional parameters are kept
Parameters
==========
required_variables:
Any required variables that this prior depends on. If none are given,
self.reference_params will be used.
"""
if sorted(list(required_variables)) == sorted(self.required_variables):
parameters = self.condition_func(self.reference_params.copy(), **required_variables)
for key, value in parameters.items():
setattr(self, key, value)
elif len(required_variables) == 0:
return
else:
raise IllegalRequiredVariablesException("Expected kwargs for {}. Got kwargs for {} instead."
.format(self.required_variables,
list(required_variables.keys())))
@property
def reference_params(self):
"""
Initial values for attributes such as `minimum`, `maximum`.
This depends on the `prior_class`, for example for the Gaussian
prior this is `mu` and `sigma`. This is read-only.
"""
return self._reference_params
@property
def condition_func(self):
return self._condition_func
@condition_func.setter
def condition_func(self, condition_func):
if condition_func is None:
self._condition_func = lambda reference_params: reference_params
else:
self._condition_func = condition_func
self._required_variables = infer_parameters_from_function(self.condition_func)
@property
def required_variables(self):
""" The required variables to pass into the condition function. """
return self._required_variables
def get_instantiation_dict(self):
instantiation_dict = super(ConditionalPrior, self).get_instantiation_dict()
for key, value in self.reference_params.items():
instantiation_dict[key] = value
return instantiation_dict
def reset_to_reference_parameters(self):
"""
Reset the object attributes to match the original reference parameters
"""
for key, value in self.reference_params.items():
setattr(self, key, value)
def __repr__(self):
"""Overrides the special method __repr__.
Returns a representation of this instance that resembles how it is instantiated.
Works correctly for all child classes
Returns
=======
str: A string representation of this instance
"""
prior_name = self.__class__.__name__
instantiation_dict = self.get_instantiation_dict()
instantiation_dict["condition_func"] = ".".join([
instantiation_dict["condition_func"].__module__,
instantiation_dict["condition_func"].__name__
])
args = ', '.join(['{}={}'.format(key, repr(instantiation_dict[key]))
for key in instantiation_dict])
return "{}({})".format(prior_name, args)
return ConditionalPrior
class ConditionalBasePrior(conditional_prior_factory(Prior)):
pass
class ConditionalUniform(conditional_prior_factory(Uniform)):
pass
class ConditionalDeltaFunction(conditional_prior_factory(DeltaFunction)):
pass
class ConditionalPowerLaw(conditional_prior_factory(PowerLaw)):
pass
class ConditionalGaussian(conditional_prior_factory(Gaussian)):
pass
class ConditionalLogUniform(conditional_prior_factory(LogUniform)):
pass
class ConditionalSymmetricLogUniform(conditional_prior_factory(SymmetricLogUniform)):
pass
class ConditionalCosine(conditional_prior_factory(Cosine)):
pass
class ConditionalSine(conditional_prior_factory(Sine)):
pass
class ConditionalTruncatedGaussian(conditional_prior_factory(TruncatedGaussian)):
pass
class ConditionalHalfGaussian(conditional_prior_factory(HalfGaussian)):
pass
class ConditionalLogNormal(conditional_prior_factory(LogNormal)):
pass
class ConditionalExponential(conditional_prior_factory(Exponential)):
pass
class ConditionalStudentT(conditional_prior_factory(StudentT)):
pass
class ConditionalBeta(conditional_prior_factory(Beta)):
pass
class ConditionalLogistic(conditional_prior_factory(Logistic)):
pass
class ConditionalCauchy(conditional_prior_factory(Cauchy)):
pass
class ConditionalGamma(conditional_prior_factory(Gamma)):
pass
class ConditionalChiSquared(conditional_prior_factory(ChiSquared)):
pass
class ConditionalFermiDirac(conditional_prior_factory(FermiDirac)):
pass
class ConditionalInterped(conditional_prior_factory(Interped)):
pass
class DirichletElement(ConditionalBeta):
r"""
Single element in a dirichlet distribution
The probability scales as
.. math::
p(x_n) \propto (x_\max - x_n)^{(N - n - 2)}
for :math:`x_n < x_\max`, where :math:`x_\max` is the sum of :math:`x_i`
for :math:`i < n`
Examples
========
n_dimensions = 1:
.. math::
p(x_0) \propto 1 ; 0 < x_0 < 1
n_dimensions = 2:
.. math::
p(x_0) &\propto (1 - x_0) ; 0 < x_0 < 1
p(x_1) &\propto 1 ; 0 < x_1 < 1
Parameters
==========
order: int
Order of this element of the dirichlet distribution.
n_dimensions: int
Total number of elements of the dirichlet distribution
label: str
Label for the dirichlet distribution.
This should be the same for all elements.
"""
def __init__(self, order, n_dimensions, label):
""" """
super(DirichletElement, self).__init__(
minimum=0, maximum=1, alpha=1, beta=n_dimensions - order - 1,
name=label + str(order),
condition_func=self.dirichlet_condition
)
self.label = label
self.n_dimensions = n_dimensions
self.order = order
self._required_variables = [
label + str(ii) for ii in range(order)
]
self.__class__.__name__ = 'Dirichlet'
def dirichlet_condition(self, reference_parms, **kwargs):
remaining = 1 - sum(
[kwargs[self.label + str(ii)] for ii in range(self.order)]
)
return dict(minimum=reference_parms["minimum"], maximum=remaining)
def __repr__(self):
return Prior.__repr__(self)
def get_instantiation_dict(self):
return Prior.get_instantiation_dict(self)
class ConditionalPriorException(PriorException):
""" General base class for all conditional prior exceptions """
class IllegalRequiredVariablesException(ConditionalPriorException):
""" Exception class for exceptions relating to handling the required variables. """
| [
"numpy.random.uniform"
] | [((3524, 3553), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'size'], {}), '(0, 1, size)\n', (3541, 3553), True, 'import numpy as np\n')] |
import unittest
import json
import haversine as hv
import math
import numpy as np
import emission.simulation.generate_trips as esgt
import emission.simulation.transition_prob as estp
class TestGenerateTrips(unittest.TestCase):
def setUp(self):
with open("conf/tour.conf.sample") as tcs:
self.sampleTourConfig = json.load(tcs)
n_labels = len(self.sampleTourConfig["locations"])
self.sampleTourConfig["transition_probs"] = estp.generate_random_transition_prob(n_labels)
def testInitDataFrame(self):
labels = ["foo", "bar", "baz"]
df = esgt._init_dataframe(labels, dtype=dict)
self.assertEqual(len(df["foo"]), 3, "test init row length")
self.assertIsNone(df.loc["foo", "bar"], "test init cell values")
return df
def testModifyDataFrame(self):
df = self.testInitDataFrame()
foobar = {"BICYCLE": 0.5}
df.at["foo", "bar"] = foobar
self.assertEqual(df.loc["foo", "bar"]["BICYCLE"], 0.5, "test modify cell")
foobaz = {"TRANSIT": 0.5, "CAR": 0.5}
df.at["foo", "baz"] = foobaz
self.assertEqual(df.loc["foo", "baz"]["CAR"], 0.5, "test modify cell with two entries")
self.assertEqual(df.loc["foo"].to_list(), [None, foobar, foobaz], "test access modified row")
def testHaversineLibrary(self):
# verified by using https://www.doogal.co.uk/MeasureDistances.php
self.assertAlmostEqual(hv.haversine(
[37.77264255,-122.399714854263],
[37.42870635,-122.140926605802]),
44.52, places=2, msg="Palo Alto to SF is 44 km as the crow files")
self.assertAlmostEqual(hv.haversine(
[37.77264255,-122.399714854263],
[37.87119, -122.27388]),
15.56, places=2, msg="SF to Berkeley is 15 km as the crow files")
def testCreateDistMatrix(self):
dist_df = esgt.create_dist_matrix(self.sampleTourConfig)
self.assertEqual(dist_df.loc["home", "home"], 0, msg="same location = 0 km")
self.assertAlmostEqual(dist_df.loc["home", "work"], 44.52, places=2,
msg="home to work = SF to Palo Alto = 44 km")
self.assertAlmostEqual(dist_df.loc["work", "home"], 44.52, places=2,
msg="home to work = Palo Alto to SF = 44 km")
self.assertAlmostEqual(dist_df.loc["home", "family"], 15.56, places=2,
msg="home to family = SF to Berkeley = 15 km")
self.dist_df = dist_df
def testCalculatePossibleModes(self):
self.testCreateDistMatrix()
possible_mode_df = esgt.calculate_possible_modes(self.sampleTourConfig, self.dist_df)
self.assertIsNone(possible_mode_df.loc["home", "home"], "No mode for staying in place")
self.assertEqual(list(possible_mode_df.loc["home", "work"].keys()), ["TRANSIT", "CAR"], "Too far, can only take motorized")
self.assertEqual(list(possible_mode_df.loc["work", "home"].values()), [2, 1], "Too far, can only take motorized")
self.possible_mode_matrix = possible_mode_df
def testCalculateModeProb(self):
self.testCalculatePossibleModes()
mode_prob_matrix = esgt.calculate_mode_prob(self.possible_mode_matrix)
self.assertAlmostEqual(list(mode_prob_matrix.loc["work", "home"].values())[-1], 1,
msg="Final entry in CDF should be one")
def testFreezeDistributions(self):
dwell_time_frozen = esgt.freeze_dwell_hours_random_generators(self.sampleTourConfig)
self.assertEqual(dwell_time_frozen["work"].mean(), 8)
self.assertEqual(dwell_time_frozen["home"].std(), 2)
self.assertEqual(dwell_time_frozen["family"].support(), (-math.inf, math.inf))
mode_speed_frozen = esgt.freeze_mode_kmph_random_generators(self.sampleTourConfig)
self.assertEqual(mode_speed_frozen["WALK"].mean(), 5)
self.assertEqual(mode_speed_frozen["BICYCLE"].std(), 5)
self.assertEqual(mode_speed_frozen["TRANSIT"].support(), (-math.inf, math.inf))
def testFirstDwell(self):
sample_user = esgt.FakeUser(5, self.sampleTourConfig)
sample_user.first_dwell()
# This is a randomly generated value, so we can't really verify what is
# it supposed to be. The support is (-inf, inf) so it could really be
# anything. We could check for 2 standard deviations, and comment out
# if it fails too much. Or just run to ensure that the elapsed time is updated
self.assertGreater(sample_user._elapsed_mins, 0,
msg="Elapsed time has not been updated")
self.assertGreater(sample_user._elapsed_mins, 60,
msg="Elapsed time is an order of magnitude lower, check scale")
self.assertLess(sample_user._elapsed_mins, 60 * 60,
msg="Elapsed time is an order of magnitude lower, check scale")
def testFirstDwellRV(self):
sample_user = esgt.FakeUser(5, self.sampleTourConfig)
first_dwell_rv = sample_user._freeze_first_dwell()
# print(first_dwell_rv.mean())
dwell_times = first_dwell_rv.rvs(size=100000)
# print(dwell_times)
self.assertAlmostEqual(dwell_times.mean(), 8, places=1,
msg="check mean of final probability distribution for initial home state")
self.assertAlmostEqual(dwell_times.std(), 1, places=1,
msg="check std of final probability distribution for initial home state")
def testLastDwell(self):
sample_user = esgt.FakeUser(5, self.sampleTourConfig)
act = sample_user.last_dwell()
self.assertIn("@type", act)
self.assertNotIn("@elapsed_ts", act)
self.assertNotIn("leg", act)
def testTakeTrip(self):
sample_user = esgt.FakeUser(5, self.sampleTourConfig)
sample_user.first_dwell()
act = sample_user.take_trip()
nRetries = 0
MAX_RETRIES = 10
while act is None and nRetries < MAX_RETRIES:
act = sample_user.take_trip()
# print(act)
# Again, because this is randomly generated, we cannot check the
# values, only that they exist and are formatted correctly
self.assertEqual(act["@type"], "home")
self.assertIn("leg", act)
self.assertIn("@mode", act["leg"])
self.assertIn(act["leg"]["@mode"], ["TRANSIT", "CAR"])
def testTakeTrips(self):
sample_user = esgt.FakeUser(5, self.sampleTourConfig)
act_list = sample_user.take_trips()
# print(act_list)
act_pairs = list(zip(act_list[:-1], act_list[1:-1]))
elapsed_time_diffs = np.asarray([ea["@elapsed_ts"] - sa["@elapsed_ts"] for (sa, ea) in act_pairs])
# print(elapsed_time_diffs)
self.assertTrue((elapsed_time_diffs > 0).all(),
"time is monotonically increasing")
adjacent_labels = [(ea["@type"], sa["@type"]) for (sa, ea) in act_pairs]
self.assertEqual(len(adjacent_labels), 4, "Checking number of label pairs")
duplicate_adjacent_labels = [sl == el for (sl, el) in adjacent_labels]
# print(duplicate_adjacent_labels)
self.assertFalse(np.array(duplicate_adjacent_labels).any(), "Checking no duplicate labels")
| [
"emission.simulation.generate_trips.create_dist_matrix",
"emission.simulation.transition_prob.generate_random_transition_prob",
"haversine.haversine",
"numpy.asarray",
"emission.simulation.generate_trips._init_dataframe",
"json.load",
"numpy.array",
"emission.simulation.generate_trips.FakeUser",
"em... | [((463, 509), 'emission.simulation.transition_prob.generate_random_transition_prob', 'estp.generate_random_transition_prob', (['n_labels'], {}), '(n_labels)\n', (499, 509), True, 'import emission.simulation.transition_prob as estp\n'), ((596, 636), 'emission.simulation.generate_trips._init_dataframe', 'esgt._init_dataframe', (['labels'], {'dtype': 'dict'}), '(labels, dtype=dict)\n', (616, 636), True, 'import emission.simulation.generate_trips as esgt\n'), ((1895, 1941), 'emission.simulation.generate_trips.create_dist_matrix', 'esgt.create_dist_matrix', (['self.sampleTourConfig'], {}), '(self.sampleTourConfig)\n', (1918, 1941), True, 'import emission.simulation.generate_trips as esgt\n'), ((2572, 2638), 'emission.simulation.generate_trips.calculate_possible_modes', 'esgt.calculate_possible_modes', (['self.sampleTourConfig', 'self.dist_df'], {}), '(self.sampleTourConfig, self.dist_df)\n', (2601, 2638), True, 'import emission.simulation.generate_trips as esgt\n'), ((3149, 3200), 'emission.simulation.generate_trips.calculate_mode_prob', 'esgt.calculate_mode_prob', (['self.possible_mode_matrix'], {}), '(self.possible_mode_matrix)\n', (3173, 3200), True, 'import emission.simulation.generate_trips as esgt\n'), ((3412, 3476), 'emission.simulation.generate_trips.freeze_dwell_hours_random_generators', 'esgt.freeze_dwell_hours_random_generators', (['self.sampleTourConfig'], {}), '(self.sampleTourConfig)\n', (3453, 3476), True, 'import emission.simulation.generate_trips as esgt\n'), ((3716, 3778), 'emission.simulation.generate_trips.freeze_mode_kmph_random_generators', 'esgt.freeze_mode_kmph_random_generators', (['self.sampleTourConfig'], {}), '(self.sampleTourConfig)\n', (3755, 3778), True, 'import emission.simulation.generate_trips as esgt\n'), ((4046, 4085), 'emission.simulation.generate_trips.FakeUser', 'esgt.FakeUser', (['(5)', 'self.sampleTourConfig'], {}), '(5, self.sampleTourConfig)\n', (4059, 4085), True, 'import emission.simulation.generate_trips as esgt\n'), ((4880, 4919), 'emission.simulation.generate_trips.FakeUser', 'esgt.FakeUser', (['(5)', 'self.sampleTourConfig'], {}), '(5, self.sampleTourConfig)\n', (4893, 4919), True, 'import emission.simulation.generate_trips as esgt\n'), ((5454, 5493), 'emission.simulation.generate_trips.FakeUser', 'esgt.FakeUser', (['(5)', 'self.sampleTourConfig'], {}), '(5, self.sampleTourConfig)\n', (5467, 5493), True, 'import emission.simulation.generate_trips as esgt\n'), ((5702, 5741), 'emission.simulation.generate_trips.FakeUser', 'esgt.FakeUser', (['(5)', 'self.sampleTourConfig'], {}), '(5, self.sampleTourConfig)\n', (5715, 5741), True, 'import emission.simulation.generate_trips as esgt\n'), ((6356, 6395), 'emission.simulation.generate_trips.FakeUser', 'esgt.FakeUser', (['(5)', 'self.sampleTourConfig'], {}), '(5, self.sampleTourConfig)\n', (6369, 6395), True, 'import emission.simulation.generate_trips as esgt\n'), ((6556, 6633), 'numpy.asarray', 'np.asarray', (["[(ea['@elapsed_ts'] - sa['@elapsed_ts']) for sa, ea in act_pairs]"], {}), "([(ea['@elapsed_ts'] - sa['@elapsed_ts']) for sa, ea in act_pairs])\n", (6566, 6633), True, 'import numpy as np\n'), ((337, 351), 'json.load', 'json.load', (['tcs'], {}), '(tcs)\n', (346, 351), False, 'import json\n'), ((1450, 1535), 'haversine.haversine', 'hv.haversine', (['[37.77264255, -122.399714854263]', '[37.42870635, -122.140926605802]'], {}), '([37.77264255, -122.399714854263], [37.42870635, -122.140926605802]\n )\n', (1462, 1535), True, 'import haversine as hv\n'), ((1666, 1736), 'haversine.haversine', 'hv.haversine', (['[37.77264255, -122.399714854263]', '[37.87119, -122.27388]'], {}), '([37.77264255, -122.399714854263], [37.87119, -122.27388])\n', (1678, 1736), True, 'import haversine as hv\n'), ((7086, 7121), 'numpy.array', 'np.array', (['duplicate_adjacent_labels'], {}), '(duplicate_adjacent_labels)\n', (7094, 7121), True, 'import numpy as np\n')] |
import numpy as np
import torch as th
from torchvision import transforms
from .data_utils import is_tuple_or_list
class BaseDataset:
"""An abstract class representing a Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __len__(self):
return len(self.inputs) if not isinstance(self.inputs, (tuple,list)) else len(self.inputs[0])
def add_input_transform(self, transform, add_to_front=True, idx=None):
if idx is None:
idx = np.arange(len(self.num_inputs))
elif not is_tuple_or_list(idx):
idx = [idx]
if add_to_front:
for i in idx:
self.input_transform[i] = transforms.Compose([transform, self.input_transform[i]])
else:
for i in idx:
self.input_transform[i] = transforms.Compose([self.input_transform[i], transform])
def add_target_transform(self, transform, add_to_front=True, idx=None):
if idx is None:
idx = np.arange(len(self.num_targets))
elif not is_tuple_or_list(idx):
idx = [idx]
if add_to_front:
for i in idx:
self.target_transform[i] = transforms.Compose([transform, self.target_transform[i]])
else:
for i in idx:
self.target_transform[i] = transforms.Compose([self.target_transform[i], transform])
def add_co_transform(self, transform, add_to_front=True, idx=None):
if idx is None:
idx = np.arange(len(self.min_inputs_or_targets))
elif not is_tuple_or_list(idx):
idx = [idx]
if add_to_front:
for i in idx:
self.co_transform[i] = transforms.Compose([transform, self.co_transform[i]])
else:
for i in idx:
self.co_transform[i] = transforms.Compose([self.co_transform[i], transform])
def load(self, num_samples=None, load_range=None):
"""
Load all data or a subset of the data into actual memory.
For instance, if the inputs are paths to image files, then this
function will actually load those images.
:param num_samples: (int (optional)):
number of samples to load. if None, will load all
:param load_range: (numpy array of integers (optional)):
the index range of images to load
e.g. np.arange(4) loads the first 4 inputs+targets
"""
def _parse_shape(x):
if isinstance(x, (list,tuple)):
return (len(x),)
elif isinstance(x, th.Tensor):
return x.size()
else:
return (1,)
if num_samples is None and load_range is None:
num_samples = len(self)
load_range = np.arange(num_samples)
elif num_samples is None and load_range is not None:
num_samples = len(load_range)
elif num_samples is not None and load_range is None:
load_range = np.arange(num_samples)
if self.has_target:
for enum_idx, sample_idx in enumerate(load_range):
input_sample, target_sample = self.__getitem__(sample_idx)
if enum_idx == 0:
if self.num_inputs == 1:
_shape = [len(load_range)] + list(_parse_shape(input_sample))
inputs = np.empty(_shape)
else:
inputs = []
for i in range(self.num_inputs):
_shape = [len(load_range)] + list(_parse_shape(input_sample[i]))
inputs.append(np.empty(_shape))
#inputs = [np.empty((len(load_range), *_parse_shape(input_sample[i]))) for i in range(self.num_inputs)]
if self.num_targets == 1:
_shape = [len(load_range)] + list(_parse_shape(target_sample))
targets = np.empty(_shape)
#targets = np.empty((len(load_range), *_parse_shape(target_sample)))
else:
targets = []
for i in range(self.num_targets):
_shape = [len(load_range)] + list(_parse_shape(target_sample[i]))
targets.append(np.empty(_shape))
#targets = [np.empty((len(load_range), *_parse_shape(target_sample[i]))) for i in range(self.num_targets)]
if self.num_inputs == 1:
inputs[enum_idx] = input_sample
else:
for i in range(self.num_inputs):
inputs[i][enum_idx] = input_sample[i]
if self.num_targets == 1:
targets[enum_idx] = target_sample
else:
for i in range(self.num_targets):
targets[i][enum_idx] = target_sample[i]
return inputs, targets
else:
for enum_idx, sample_idx in enumerate(load_range):
input_sample = self.__getitem__(sample_idx)
if enum_idx == 0:
if self.num_inputs == 1:
_shape = [len(load_range)] + list(_parse_shape(input_sample))
inputs = np.empty(_shape)
#inputs = np.empty((len(load_range), *_parse_shape(input_sample)))
else:
inputs = []
for i in range(self.num_inputs):
_shape = [len(load_range)] + list(_parse_shape(input_sample[i]))
inputs.append(np.empty(_shape))
#inputs = [np.empty((len(load_range), *_parse_shape(input_sample[i]))) for i in range(self.num_inputs)]
if self.num_inputs == 1:
inputs[enum_idx] = input_sample
else:
for i in range(self.num_inputs):
inputs[i][enum_idx] = input_sample[i]
return inputs
def fit_transforms(self):
"""
Make a single pass through the entire dataset in order to fit
any parameters of the transforms which require the entire dataset.
e.g. StandardScaler() requires mean and std for the entire dataset.
If you dont call this fit function, then transforms which require properties
of the entire dataset will just work at the batch level.
e.g. StandardScaler() will normalize each batch by the specific batch mean/std
"""
it_fit = hasattr(self.input_transform, 'update_fit')
tt_fit = hasattr(self.target_transform, 'update_fit')
ct_fit = hasattr(self.co_transform, 'update_fit')
if it_fit or tt_fit or ct_fit:
for sample_idx in range(len(self)):
if hasattr(self, 'input_loader'):
x = self.input_loader(self.inputs[sample_idx])
else:
x = self.inputs[sample_idx]
if it_fit:
self.input_transform.update_fit(x)
if self.has_target:
if hasattr(self, 'target_loader'):
y = self.target_loader(self.targets[sample_idx])
else:
y = self.targets[sample_idx]
if tt_fit:
self.target_transform.update_fit(y)
if ct_fit:
self.co_transform.update_fit(x,y) | [
"numpy.empty",
"torchvision.transforms.Compose",
"numpy.arange"
] | [((2951, 2973), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (2960, 2973), True, 'import numpy as np\n'), ((850, 906), 'torchvision.transforms.Compose', 'transforms.Compose', (['[transform, self.input_transform[i]]'], {}), '([transform, self.input_transform[i]])\n', (868, 906), False, 'from torchvision import transforms\n'), ((989, 1045), 'torchvision.transforms.Compose', 'transforms.Compose', (['[self.input_transform[i], transform]'], {}), '([self.input_transform[i], transform])\n', (1007, 1045), False, 'from torchvision import transforms\n'), ((1357, 1414), 'torchvision.transforms.Compose', 'transforms.Compose', (['[transform, self.target_transform[i]]'], {}), '([transform, self.target_transform[i]])\n', (1375, 1414), False, 'from torchvision import transforms\n'), ((1498, 1555), 'torchvision.transforms.Compose', 'transforms.Compose', (['[self.target_transform[i], transform]'], {}), '([self.target_transform[i], transform])\n', (1516, 1555), False, 'from torchvision import transforms\n'), ((1869, 1922), 'torchvision.transforms.Compose', 'transforms.Compose', (['[transform, self.co_transform[i]]'], {}), '([transform, self.co_transform[i]])\n', (1887, 1922), False, 'from torchvision import transforms\n'), ((2002, 2055), 'torchvision.transforms.Compose', 'transforms.Compose', (['[self.co_transform[i], transform]'], {}), '([self.co_transform[i], transform])\n', (2020, 2055), False, 'from torchvision import transforms\n'), ((3163, 3185), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (3172, 3185), True, 'import numpy as np\n'), ((3553, 3569), 'numpy.empty', 'np.empty', (['_shape'], {}), '(_shape)\n', (3561, 3569), True, 'import numpy as np\n'), ((4138, 4154), 'numpy.empty', 'np.empty', (['_shape'], {}), '(_shape)\n', (4146, 4154), True, 'import numpy as np\n'), ((5495, 5511), 'numpy.empty', 'np.empty', (['_shape'], {}), '(_shape)\n', (5503, 5511), True, 'import numpy as np\n'), ((3824, 3840), 'numpy.empty', 'np.empty', (['_shape'], {}), '(_shape)\n', (3832, 3840), True, 'import numpy as np\n'), ((4506, 4522), 'numpy.empty', 'np.empty', (['_shape'], {}), '(_shape)\n', (4514, 4522), True, 'import numpy as np\n'), ((5857, 5873), 'numpy.empty', 'np.empty', (['_shape'], {}), '(_shape)\n', (5865, 5873), True, 'import numpy as np\n')] |
"""
Logic for model creation, training launching and actions needed to be
accomplished during training (metrics monitor, model saving etc.)
"""
import os
import time
import json
import numpy as np
import tensorflow as tf
from datetime import datetime
from tensorflow.keras import Sequential
from src.datasets import load
from src.utils.callbacks import create_callbacks
from tensorflow.keras.layers import Dense, Dropout
from sklearn.model_selection import StratifiedKFold
def train(config):
np.random.seed(2020)
tf.random.set_seed(2020)
# Useful data
now = datetime.now()
now_as_str = now.strftime('%y_%m_%d-%H:%M:%S')
# Output files
checkpoint_path = config['model.save_path']
config_path = config['output.config_path'].format(date=now_as_str)
csv_output_path = config['output.train_path'].format(date=now_as_str)
tensorboard_summary_dir = config['summary.save_path']
summary_path = "results/summary.csv"
# Output dirs
data_dir = "data/"
config_dir = config_path[:config_path.rfind('/')]
output_dir = csv_output_path[:csv_output_path.rfind('/')]
# Create folder for config
if not os.path.exists(config_dir):
os.makedirs(config_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# generate config file
file = open(config_path, 'w')
file.write(json.dumps(config, indent=2))
file.close()
file = open(csv_output_path, 'w')
file.write("")
file.close()
# create summary file if not exists
if not os.path.exists(summary_path):
file = open(summary_path, 'w')
file.write("datetime, model, config, acc_std, acc_mean\n")
file.close()
# Data loader
if not os.path.exists(data_dir):
os.makedirs(data_dir)
_, X, y = load(data_dir, config, numeric=True)
# Defines datasets on the input data.
batch_size = config['data.batch_size']
# Determine device
if config['data.cuda']:
cuda_num = config['data.gpu']
device_name = f'GPU:{cuda_num}'
else:
device_name = 'CPU:0'
time_start = time.time()
# define 10-fold cross validation test harness
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
cvscores = []
print ("Running model performance validation... please wait!")
for split, (train_index, test_index) in enumerate(skf.split(X, y)):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# Compiles a model, prints the model summary, and saves the model diagram into a png file.
input_shape = (X_train.shape[1],)
model = create_model(input_shape=input_shape, learning_rate=config['train.lr'])
model.summary()
split_checkpoint_path = checkpoint_path.format(split=split)
split_results_path = csv_output_path.format(split=split)
split_checkpoint_dir = split_checkpoint_path[:split_checkpoint_path.rfind('/')]
split_results_dir = split_results_path[:split_results_path.rfind('/')]
# Create folder for model
if not os.path.exists(split_checkpoint_dir):
os.makedirs(split_checkpoint_dir)
# Create output for train process
if not os.path.exists(split_results_dir):
os.makedirs(split_results_dir)
tf.keras.utils.plot_model(model, os.path.join(split_results_dir, "keras_model.png"), show_shapes=True, show_layer_names=False)
callbacks = create_callbacks(
tensorboard_summary_dir.format(split=split),
split_results_path,
split_checkpoint_path,
patience=config['train.patience']
)
# Fit the model
with tf.device(device_name):
history = model.fit(
X_train,
y_train,
validation_split=0.1,
epochs=config['train.epochs'],
batch_size=config['data.batch_size'],
use_multiprocessing=True,
callbacks=callbacks
)
# evaluate the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
# Runs prediction on test data.
predictions = tf.round(model.predict(X_test)).numpy().flatten()
print("Predictions on test data:")
print(predictions)
model_path = tf.train.latest_checkpoint(split_checkpoint_dir, latest_filename=split_checkpoint_path)
if not model_path:
print("Skipping evaluation. No checkpoint found in: {}".format(split_checkpoint_dir))
else:
model_from_saved = tf.keras.models.load_model(model_path)
model_from_saved.summary()
# Runs test data through the reloaded model to make sure the results are same.
predictions_from_saved = tf.round(model_from_saved.predict(X_test)).numpy().flatten()
np.testing.assert_array_equal(predictions_from_saved, predictions)
print ("Done.")
print ("Summary report on mean and std.")
# The average and standard deviation of the model performance
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
time_end = time.time()
summary = "{}, {}, df, {}, {}, {}\n".format(now_as_str, config['data.dataset'], config_path, np.std(cvscores), np.mean(cvscores))
print(summary)
print(cvscores)
file = open(summary_path, 'a+')
file.write(summary)
file.close()
elapsed = time_end - time_start
h, min = elapsed//3600, elapsed%3600//60
sec = elapsed-min*60
print(f"Training took: {h:.2f}h {min:.2f}m {sec:.2f}s!")
def create_model(input_shape, learning_rate=0.01):
"""
Constructs a model using various layers and compiles the model with proper
optimizer/loss/metrics.
"""
model = Sequential(name="Probabilistic_Classifier_for_Heart_Disease")
model.add(Dense(16, kernel_initializer="normal", activation="relu", name="hidden_layer_1", input_dim=input_shape[0]))
model.add(Dropout(0.0325))
model.add(Dense(32, kernel_initializer="normal", activation="relu", name="hidden_layer_2"))
model.add(Dropout(0.0325))
model.add(Dense(32, kernel_initializer="normal", activation="relu", name="hidden_layer_3"))
model.add(Dropout(0.0325))
model.add(Dense(32, kernel_initializer="normal", activation="relu", name="hidden_layer_4"))
model.add(Dropout(0.0325))
model.add(Dense(1, activation="sigmoid", name="target"))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss="binary_crossentropy",
metrics=["accuracy"])
return model
| [
"src.datasets.load",
"sklearn.model_selection.StratifiedKFold",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"os.path.exists",
"numpy.mean",
"tensorflow.keras.Sequential",
"json.dumps",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"tensorflow.device",
"ten... | [((498, 518), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (512, 518), True, 'import numpy as np\n'), ((523, 547), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(2020)'], {}), '(2020)\n', (541, 547), True, 'import tensorflow as tf\n'), ((577, 591), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (589, 591), False, 'from datetime import datetime\n'), ((1802, 1838), 'src.datasets.load', 'load', (['data_dir', 'config'], {'numeric': '(True)'}), '(data_dir, config, numeric=True)\n', (1806, 1838), False, 'from src.datasets import load\n'), ((2113, 2124), 'time.time', 'time.time', ([], {}), '()\n', (2122, 2124), False, 'import time\n'), ((2187, 2246), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=10, shuffle=True, random_state=42)\n', (2202, 2246), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((5316, 5327), 'time.time', 'time.time', ([], {}), '()\n', (5325, 5327), False, 'import time\n'), ((5946, 6007), 'tensorflow.keras.Sequential', 'Sequential', ([], {'name': '"""Probabilistic_Classifier_for_Heart_Disease"""'}), "(name='Probabilistic_Classifier_for_Heart_Disease')\n", (5956, 6007), False, 'from tensorflow.keras import Sequential\n'), ((1156, 1182), 'os.path.exists', 'os.path.exists', (['config_dir'], {}), '(config_dir)\n', (1170, 1182), False, 'import os\n'), ((1192, 1215), 'os.makedirs', 'os.makedirs', (['config_dir'], {}), '(config_dir)\n', (1203, 1215), False, 'import os\n'), ((1228, 1254), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1242, 1254), False, 'import os\n'), ((1264, 1287), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1275, 1287), False, 'import os\n'), ((1365, 1393), 'json.dumps', 'json.dumps', (['config'], {'indent': '(2)'}), '(config, indent=2)\n', (1375, 1393), False, 'import json\n'), ((1544, 1572), 'os.path.exists', 'os.path.exists', (['summary_path'], {}), '(summary_path)\n', (1558, 1572), False, 'import os\n'), ((1731, 1755), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (1745, 1755), False, 'import os\n'), ((1765, 1786), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (1776, 1786), False, 'import os\n'), ((4487, 4579), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['split_checkpoint_dir'], {'latest_filename': 'split_checkpoint_path'}), '(split_checkpoint_dir, latest_filename=\n split_checkpoint_path)\n', (4513, 4579), True, 'import tensorflow as tf\n'), ((5426, 5442), 'numpy.std', 'np.std', (['cvscores'], {}), '(cvscores)\n', (5432, 5442), True, 'import numpy as np\n'), ((5444, 5461), 'numpy.mean', 'np.mean', (['cvscores'], {}), '(cvscores)\n', (5451, 5461), True, 'import numpy as np\n'), ((6023, 6134), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {'kernel_initializer': '"""normal"""', 'activation': '"""relu"""', 'name': '"""hidden_layer_1"""', 'input_dim': 'input_shape[0]'}), "(16, kernel_initializer='normal', activation='relu', name=\n 'hidden_layer_1', input_dim=input_shape[0])\n", (6028, 6134), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((6145, 6160), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.0325)'], {}), '(0.0325)\n', (6152, 6160), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((6176, 6261), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'kernel_initializer': '"""normal"""', 'activation': '"""relu"""', 'name': '"""hidden_layer_2"""'}), "(32, kernel_initializer='normal', activation='relu', name='hidden_layer_2'\n )\n", (6181, 6261), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((6272, 6287), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.0325)'], {}), '(0.0325)\n', (6279, 6287), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((6303, 6388), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'kernel_initializer': '"""normal"""', 'activation': '"""relu"""', 'name': '"""hidden_layer_3"""'}), "(32, kernel_initializer='normal', activation='relu', name='hidden_layer_3'\n )\n", (6308, 6388), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((6399, 6414), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.0325)'], {}), '(0.0325)\n', (6406, 6414), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((6430, 6515), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'kernel_initializer': '"""normal"""', 'activation': '"""relu"""', 'name': '"""hidden_layer_4"""'}), "(32, kernel_initializer='normal', activation='relu', name='hidden_layer_4'\n )\n", (6435, 6515), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((6526, 6541), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.0325)'], {}), '(0.0325)\n', (6533, 6541), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((6557, 6602), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""target"""'}), "(1, activation='sigmoid', name='target')\n", (6562, 6602), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((3123, 3159), 'os.path.exists', 'os.path.exists', (['split_checkpoint_dir'], {}), '(split_checkpoint_dir)\n', (3137, 3159), False, 'import os\n'), ((3173, 3206), 'os.makedirs', 'os.makedirs', (['split_checkpoint_dir'], {}), '(split_checkpoint_dir)\n', (3184, 3206), False, 'import os\n'), ((3265, 3298), 'os.path.exists', 'os.path.exists', (['split_results_dir'], {}), '(split_results_dir)\n', (3279, 3298), False, 'import os\n'), ((3312, 3342), 'os.makedirs', 'os.makedirs', (['split_results_dir'], {}), '(split_results_dir)\n', (3323, 3342), False, 'import os\n'), ((3393, 3443), 'os.path.join', 'os.path.join', (['split_results_dir', '"""keras_model.png"""'], {}), "(split_results_dir, 'keras_model.png')\n", (3405, 3443), False, 'import os\n'), ((3744, 3766), 'tensorflow.device', 'tf.device', (['device_name'], {}), '(device_name)\n', (3753, 3766), True, 'import tensorflow as tf\n'), ((4746, 4784), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {}), '(model_path)\n', (4772, 4784), True, 'import tensorflow as tf\n'), ((5026, 5092), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['predictions_from_saved', 'predictions'], {}), '(predictions_from_saved, predictions)\n', (5055, 5092), True, 'import numpy as np\n'), ((6633, 6686), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (6657, 6686), True, 'import tensorflow as tf\n'), ((5262, 5279), 'numpy.mean', 'np.mean', (['cvscores'], {}), '(cvscores)\n', (5269, 5279), True, 'import numpy as np\n'), ((5281, 5297), 'numpy.std', 'np.std', (['cvscores'], {}), '(cvscores)\n', (5287, 5297), True, 'import numpy as np\n')] |
import bayesnewton
import numpy as np
from bayesnewton.utils import solve
from jax.config import config
config.update("jax_enable_x64", True)
import pytest
def wiggly_time_series(x_):
noise_var = 0.15 # true observation noise
return (np.cos(0.04*x_+0.33*np.pi) * np.sin(0.2*x_) +
np.math.sqrt(noise_var) * np.random.normal(0, 1, x_.shape))
def build_data(N):
# np.random.seed(12345)
x = np.random.permutation(np.linspace(-25.0, 150.0, num=N) + 0.5*np.random.randn(N)) # unevenly spaced
x = np.sort(x) # since MarkovGP sorts the inputs, they must also be sorted for GP
y = wiggly_time_series(x)
# x_test = np.linspace(np.min(x)-15.0, np.max(x)+15.0, num=500)
# y_test = wiggly_time_series(x_test)
# x_plot = np.linspace(np.min(x)-20.0, np.max(x)+20.0, 200)
x = x[:, None]
# y = y[:, None]
# x_plot = x_plot[:, None]
return x, y
def initialise_gp_model(var_f, len_f, var_y, x, y):
kernel = bayesnewton.kernels.Matern52(variance=var_f, lengthscale=len_f)
likelihood = bayesnewton.likelihoods.Gaussian(variance=var_y)
model = bayesnewton.models.VariationalGP(kernel=kernel, likelihood=likelihood, X=x, Y=y)
return model
@pytest.mark.parametrize('var_f', [0.5, 1.5])
@pytest.mark.parametrize('len_f', [0.75, 2.5])
@pytest.mark.parametrize('var_y', [0.1, 0.5])
@pytest.mark.parametrize('N', [30, 60])
def test_marg_lik(var_f, len_f, var_y, N):
"""
test whether VI with newt's GP and Gaussian likelihood gives the exact marginal likelihood
"""
x, y = build_data(N)
gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)
gp_model.inference(lr=1.) # update variational params
loss_gp = gp_model.energy()
print(loss_gp)
K_X = gp_model.kernel(x, x)
K_Y = K_X + var_y * np.eye(K_X.shape[0])
L_Y = np.linalg.cholesky(K_Y)
exact_marg_lik = (
-0.5 * y.T @ solve(K_Y, y)
- np.sum(np.log(np.diag(L_Y)))
- 0.5 * y.shape[0] * np.log(2 * np.pi)
)
print(exact_marg_lik)
np.testing.assert_almost_equal(loss_gp, -exact_marg_lik, decimal=4)
| [
"numpy.random.normal",
"numpy.eye",
"numpy.math.sqrt",
"numpy.sin",
"numpy.sort",
"numpy.log",
"bayesnewton.utils.solve",
"bayesnewton.models.VariationalGP",
"numpy.diag",
"bayesnewton.kernels.Matern52",
"pytest.mark.parametrize",
"numpy.testing.assert_almost_equal",
"numpy.linspace",
"bay... | [((104, 141), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (117, 141), False, 'from jax.config import config\n'), ((1209, 1253), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""var_f"""', '[0.5, 1.5]'], {}), "('var_f', [0.5, 1.5])\n", (1232, 1253), False, 'import pytest\n'), ((1255, 1300), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""len_f"""', '[0.75, 2.5]'], {}), "('len_f', [0.75, 2.5])\n", (1278, 1300), False, 'import pytest\n'), ((1302, 1346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""var_y"""', '[0.1, 0.5]'], {}), "('var_y', [0.1, 0.5])\n", (1325, 1346), False, 'import pytest\n'), ((1348, 1386), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""N"""', '[30, 60]'], {}), "('N', [30, 60])\n", (1371, 1386), False, 'import pytest\n'), ((528, 538), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (535, 538), True, 'import numpy as np\n'), ((966, 1029), 'bayesnewton.kernels.Matern52', 'bayesnewton.kernels.Matern52', ([], {'variance': 'var_f', 'lengthscale': 'len_f'}), '(variance=var_f, lengthscale=len_f)\n', (994, 1029), False, 'import bayesnewton\n'), ((1047, 1095), 'bayesnewton.likelihoods.Gaussian', 'bayesnewton.likelihoods.Gaussian', ([], {'variance': 'var_y'}), '(variance=var_y)\n', (1079, 1095), False, 'import bayesnewton\n'), ((1108, 1193), 'bayesnewton.models.VariationalGP', 'bayesnewton.models.VariationalGP', ([], {'kernel': 'kernel', 'likelihood': 'likelihood', 'X': 'x', 'Y': 'y'}), '(kernel=kernel, likelihood=likelihood, X=x, Y=y\n )\n', (1140, 1193), False, 'import bayesnewton\n'), ((1829, 1852), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['K_Y'], {}), '(K_Y)\n', (1847, 1852), True, 'import numpy as np\n'), ((2047, 2114), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['loss_gp', '(-exact_marg_lik)'], {'decimal': '(4)'}), '(loss_gp, -exact_marg_lik, decimal=4)\n', (2077, 2114), True, 'import numpy as np\n'), ((245, 277), 'numpy.cos', 'np.cos', (['(0.04 * x_ + 0.33 * np.pi)'], {}), '(0.04 * x_ + 0.33 * np.pi)\n', (251, 277), True, 'import numpy as np\n'), ((274, 290), 'numpy.sin', 'np.sin', (['(0.2 * x_)'], {}), '(0.2 * x_)\n', (280, 290), True, 'import numpy as np\n'), ((303, 326), 'numpy.math.sqrt', 'np.math.sqrt', (['noise_var'], {}), '(noise_var)\n', (315, 326), True, 'import numpy as np\n'), ((329, 361), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'x_.shape'], {}), '(0, 1, x_.shape)\n', (345, 361), True, 'import numpy as np\n'), ((442, 474), 'numpy.linspace', 'np.linspace', (['(-25.0)', '(150.0)'], {'num': 'N'}), '(-25.0, 150.0, num=N)\n', (453, 474), True, 'import numpy as np\n'), ((1798, 1818), 'numpy.eye', 'np.eye', (['K_X.shape[0]'], {}), '(K_X.shape[0])\n', (1804, 1818), True, 'import numpy as np\n'), ((1991, 2008), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1997, 2008), True, 'import numpy as np\n'), ((481, 499), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (496, 499), True, 'import numpy as np\n'), ((1901, 1914), 'bayesnewton.utils.solve', 'solve', (['K_Y', 'y'], {}), '(K_Y, y)\n', (1906, 1914), False, 'from bayesnewton.utils import solve\n'), ((1943, 1955), 'numpy.diag', 'np.diag', (['L_Y'], {}), '(L_Y)\n', (1950, 1955), True, 'import numpy as np\n')] |
# written for "run1.bag"
# this script replaces nan values with inf to comply with REP specifications: https://www.ros.org/reps/rep-0117.html
# run "rosbag compress run1_fixed.bag" after processing
import rosbag
import numpy as np
with rosbag.Bag('run1_fixed.bag', 'w') as outbag:
for topic, msg, t in rosbag.Bag('run1.bag').read_messages():
if topic == "/scan":
new_ranges = []
for i in range(len(msg.ranges)):
if np.isnan(msg.ranges[i]):
new_ranges.append(np.inf)
else:
new_ranges.append(msg.ranges[i])
msg.ranges = new_ranges
outbag.write(topic, msg, t)
else:
outbag.write(topic, msg, t) | [
"numpy.isnan",
"rosbag.Bag"
] | [((239, 272), 'rosbag.Bag', 'rosbag.Bag', (['"""run1_fixed.bag"""', '"""w"""'], {}), "('run1_fixed.bag', 'w')\n", (249, 272), False, 'import rosbag\n'), ((309, 331), 'rosbag.Bag', 'rosbag.Bag', (['"""run1.bag"""'], {}), "('run1.bag')\n", (319, 331), False, 'import rosbag\n'), ((470, 493), 'numpy.isnan', 'np.isnan', (['msg.ranges[i]'], {}), '(msg.ranges[i])\n', (478, 493), True, 'import numpy as np\n')] |
from typing import List, Tuple, Union
import numpy as np
from z3 import And, Not
from quavl.lib.expressions.complex import ComplexVal
from quavl.lib.expressions.qbit import QbitVal
from quavl.lib.expressions.rqbit import RQbitVal
def qbit_equals_value(qbit: Union[QbitVal, RQbitVal], value: Tuple[Union[int, float], Union[int, float]]):
"""
SMT qbit equals value.
:param qbit: qbit.
:param value: tuple of integer values.
:return: SMT equals expression.
"""
if type(value[0]) == int:
if isinstance(qbit, RQbitVal):
return And(qbit.z0 == bool(value[0]), qbit.z1 == bool(value[1]))
return And(qbit.alpha.r == value[0], qbit.alpha.i == 0,
qbit.beta.r == value[1], qbit.beta.i == 0)
else:
return And(qbit.alpha.r == value[0].real, qbit.alpha.i == value[0].imag,
qbit.beta.r == value[1].real, qbit.beta.i == value[1].imag)
def rqbit_equals_rqbit(rqbit_a: RQbitVal, rqbit_b: RQbitVal):
"""
SMT RQbit equals other RQbit.
:param rqbit_a: first.
:param rqbit_b: second.
:return: SMT equals.
"""
return rqbit_a.z0 == rqbit_b.z0 and rqbit_a.z1 == rqbit_b.z1 and \
rqbit_a.h0 == rqbit_b.h0 and rqbit_a.h1 == rqbit_b.h1 and \
rqbit_a.zm0 == rqbit_b.zm0 and rqbit_a.zm1 == rqbit_b.zm1 and \
rqbit_a.hm0 == rqbit_b.hm0 and rqbit_a.hm1 == rqbit_b.hm1 and \
rqbit_a.v0 == rqbit_b.v0 and rqbit_a.v1 == rqbit_b.v1
def qbit_isclose_to_value(qbit: QbitVal, value: Tuple[Union[int, float], Union[int, float]],
delta: float = 0.0001):
"""
SMT qbit is close to value.
:param qbit: qbit.
:param value: tuple of integer values.
:param delta: error tolerance (absolute).
:return: SMT equals expression.
"""
if type(value[0]) == int:
return And(value[0] - delta <= qbit.alpha.r, qbit.alpha.r <= value[0] + delta,
-delta <= qbit.alpha.i, qbit.alpha.i <= delta,
value[1] - delta <= qbit.beta.r, qbit.beta.r <= value[1] + delta,
-delta <= qbit.beta.i, qbit.beta.i <= delta)
else:
return And(value[0].real - delta <= qbit.alpha.r, qbit.alpha.r <= value[0].real + delta,
value[0].imag - delta <= qbit.alpha.i, qbit.alpha.i <= value[0].imag + delta,
value[1].real - delta <= qbit.beta.r, qbit.beta.r <= value[1].real + delta,
value[1].imag - delta <= qbit.beta.i, qbit.beta.i <= value[1].imag + delta)
def qbits_equal(qbit_a: QbitVal, qbit_b: QbitVal):
"""
SMT qbit equals qbit.
:param qbit_a: first.
:param qbit_b: second.
:return: SMT equals expression.
"""
return And(qbit_a.alpha.r == qbit_b.alpha.r, qbit_a.alpha.i == qbit_b.alpha.i,
qbit_a.beta.r == qbit_b.beta.r, qbit_a.beta.i == qbit_b.beta.i)
def state_equals(psi: List, psi_prime: List):
if len(psi) != len(psi_prime):
raise Exception(
f'States are not the same dimension, first is dimension {len(psi)}, second is dimension {len(psi_prime)}.')
elements = []
for i in range(len(psi)):
elements.append(psi_prime[i] == psi[i])
return And(elements)
def state_equals_phase_oracle(psi: List, psi_prime: List, oracle_value: int):
if len(psi) != len(psi_prime):
raise Exception(
f'States are not the same dimension, first is dimension {len(psi)}, second is dimension {len(psi_prime)}.')
if oracle_value > len(psi) - 1:
raise Exception(f'Oracle value {oracle_value} is not in the value range 0 to {len(psi)}.')
elements = []
for i in range(len(psi)):
if i == oracle_value:
elements.append(psi_prime[i] == psi[i] * (-1))
else:
elements.append(psi_prime[i] == psi[i])
return And(elements)
def state_not_equals(psi: List, psi_prime: List):
if len(psi) != len(psi_prime):
raise Exception("States are not the same dimension.")
elements = []
for i in range(len(psi)):
elements.append(psi[i] == psi_prime[i])
return Not(And(elements))
def qbit_kron(qbit_a: QbitVal, qbit_b: QbitVal) -> List:
"""
Kronecker product of two qbits.
:param qbit_a: first qbit.
:param qbit_b: second qbit.
:return: Kronecker product.
"""
return complex_kron(qbit_a.to_complex_list(), qbit_b.to_complex_list())
def qbit_kron_n_ary(qbits: List[QbitVal]) -> List:
"""
Kronecker product of n qbits.
:param qbits: list of qbits.
:return: N-ary kronecker product.
"""
kronecker_product = None
for qbit in qbits:
if kronecker_product is None:
kronecker_product = qbit.to_complex_list()
else:
kronecker_product = complex_kron(kronecker_product, qbit.to_complex_list())
return kronecker_product
def complex_kron(vector_a: List[ComplexVal], vector_b: List[ComplexVal]) -> List:
"""
Kronecker product of two complex vectors.
:param vector_a: first vector.
:param vector_b: second vector.
:return: Kronecker product.
"""
output_vector = [None] * (len(vector_a) * len(vector_b))
for i, entry_1 in enumerate(vector_a):
for j, entry_2 in enumerate(vector_b):
output_vector[(len(vector_b)) * i + j] = entry_1 * entry_2
return output_vector
def complex_kron_n_ary(vectors: List[List[ComplexVal]]) -> List:
"""
N-ary Kronecker product of >= 2 complex vectors.
:param vectors: list of vectors.
:return: N-ary Kronecker product.
"""
if len(vectors) == 0 or not isinstance(vectors[0][0], ComplexVal):
raise Exception('Illegal argument: needs to be a list of at least 2 complex vectors.')
kronecker_product = None
for vector in vectors:
if kronecker_product is None:
kronecker_product = vector
else:
kronecker_product = complex_kron(kronecker_product, vector)
return kronecker_product
def kron(vectors: List[np.ndarray]) -> np.ndarray:
"""
Numpy-based n-ary Kronecker product.
:param vectors: list of vectors or matrices.
:return: N-ary Kronecker product.
"""
kronecker_product = None
for vector in vectors:
if kronecker_product is None:
kronecker_product = vector
else:
kronecker_product = np.kron(kronecker_product, vector)
return kronecker_product
def matmul(matrices: List[np.ndarray]) -> np.ndarray:
"""
Numpy n-ary matrix product.
:param matrices: list of matrices.
:return: N-ary Matrix product.
"""
product = None
for matrix in matrices:
if product is None:
product = matrix
else:
product = np.matmul(matrix, product)
return product
def matrix_vector_multiplication(matrix: List[List], vector: List) -> List:
if len(matrix[0]) != len(vector):
raise Exception(f'Matrix column count ({len(matrix[0])}) has to be equal to vector row count ({len(vector)}).')
# General case
m = len(matrix[0])
n = len(matrix)
output_vector = [ComplexVal(0)] * m
for i in range(m):
for k in range(n):
if isinstance(matrix[i][k], ComplexVal) and matrix[i][k].r == 0 and matrix[i][k].i == 0:
continue
else:
output_vector[i] += matrix[i][k] * vector[k]
return output_vector
| [
"numpy.kron",
"z3.And",
"numpy.matmul",
"quavl.lib.expressions.complex.ComplexVal"
] | [((2741, 2881), 'z3.And', 'And', (['(qbit_a.alpha.r == qbit_b.alpha.r)', '(qbit_a.alpha.i == qbit_b.alpha.i)', '(qbit_a.beta.r == qbit_b.beta.r)', '(qbit_a.beta.i == qbit_b.beta.i)'], {}), '(qbit_a.alpha.r == qbit_b.alpha.r, qbit_a.alpha.i == qbit_b.alpha.i, \n qbit_a.beta.r == qbit_b.beta.r, qbit_a.beta.i == qbit_b.beta.i)\n', (2744, 2881), False, 'from z3 import And, Not\n'), ((3230, 3243), 'z3.And', 'And', (['elements'], {}), '(elements)\n', (3233, 3243), False, 'from z3 import And, Not\n'), ((3857, 3870), 'z3.And', 'And', (['elements'], {}), '(elements)\n', (3860, 3870), False, 'from z3 import And, Not\n'), ((647, 743), 'z3.And', 'And', (['(qbit.alpha.r == value[0])', '(qbit.alpha.i == 0)', '(qbit.beta.r == value[1])', '(qbit.beta.i == 0)'], {}), '(qbit.alpha.r == value[0], qbit.alpha.i == 0, qbit.beta.r == value[1], \n qbit.beta.i == 0)\n', (650, 743), False, 'from z3 import And, Not\n'), ((783, 913), 'z3.And', 'And', (['(qbit.alpha.r == value[0].real)', '(qbit.alpha.i == value[0].imag)', '(qbit.beta.r == value[1].real)', '(qbit.beta.i == value[1].imag)'], {}), '(qbit.alpha.r == value[0].real, qbit.alpha.i == value[0].imag, qbit.beta\n .r == value[1].real, qbit.beta.i == value[1].imag)\n', (786, 913), False, 'from z3 import And, Not\n'), ((1865, 2109), 'z3.And', 'And', (['(value[0] - delta <= qbit.alpha.r)', '(qbit.alpha.r <= value[0] + delta)', '(-delta <= qbit.alpha.i)', '(qbit.alpha.i <= delta)', '(value[1] - delta <= qbit.beta.r)', '(qbit.beta.r <= value[1] + delta)', '(-delta <= qbit.beta.i)', '(qbit.beta.i <= delta)'], {}), '(value[0] - delta <= qbit.alpha.r, qbit.alpha.r <= value[0] + delta, -\n delta <= qbit.alpha.i, qbit.alpha.i <= delta, value[1] - delta <= qbit.\n beta.r, qbit.beta.r <= value[1] + delta, -delta <= qbit.beta.i, qbit.\n beta.i <= delta)\n', (1868, 2109), False, 'from z3 import And, Not\n'), ((2177, 2507), 'z3.And', 'And', (['(value[0].real - delta <= qbit.alpha.r)', '(qbit.alpha.r <= value[0].real + delta)', '(value[0].imag - delta <= qbit.alpha.i)', '(qbit.alpha.i <= value[0].imag + delta)', '(value[1].real - delta <= qbit.beta.r)', '(qbit.beta.r <= value[1].real + delta)', '(value[1].imag - delta <= qbit.beta.i)', '(qbit.beta.i <= value[1].imag + delta)'], {}), '(value[0].real - delta <= qbit.alpha.r, qbit.alpha.r <= value[0].real +\n delta, value[0].imag - delta <= qbit.alpha.i, qbit.alpha.i <= value[0].\n imag + delta, value[1].real - delta <= qbit.beta.r, qbit.beta.r <= \n value[1].real + delta, value[1].imag - delta <= qbit.beta.i, qbit.beta.\n i <= value[1].imag + delta)\n', (2180, 2507), False, 'from z3 import And, Not\n'), ((4134, 4147), 'z3.And', 'And', (['elements'], {}), '(elements)\n', (4137, 4147), False, 'from z3 import And, Not\n'), ((6386, 6420), 'numpy.kron', 'np.kron', (['kronecker_product', 'vector'], {}), '(kronecker_product, vector)\n', (6393, 6420), True, 'import numpy as np\n'), ((6770, 6796), 'numpy.matmul', 'np.matmul', (['matrix', 'product'], {}), '(matrix, product)\n', (6779, 6796), True, 'import numpy as np\n'), ((7138, 7151), 'quavl.lib.expressions.complex.ComplexVal', 'ComplexVal', (['(0)'], {}), '(0)\n', (7148, 7151), False, 'from quavl.lib.expressions.complex import ComplexVal\n')] |
from sigpy.polys.polynomials import Polynomial
from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, \
relative_coeff_vector
import cvxpy
import numpy as np
from itertools import combinations_with_replacement
def sage_poly_dual(p, level=0):
sr, cons = p.sig_rep
# If p.c contains no cvxpy Expressions, then
# cons should be a list of length zero.
return sage_dual(sr, level, additional_cons=cons)
def sage_poly_primal(p, level=0):
sr, cons = p.sig_rep
# If p.c contains no cvxpy Expressions, then
# cons should be a list of length zero.
return sage_primal(sr, level, additional_cons=cons)
def sage_poly_feasibility(p):
sr, cons = p.sig_rep
# If p.c contains no cvxpy Expressions, then
# cons should be a list of length zero.
return sage_feasibility(sr, additional_cons=cons)
def sage_poly_multiplier_search(p, level=1):
"""
Suppose we have a nonnegative polynomial p that is not SAGE. Do we have an alternative
to proving that p is nonnegative other than moving up the usual SAGE hierarchy?
Indeed we do. We can define a multiplier
mult = Polynomial(alpha_hat, c_tilde)
where the rows of alpha_hat are all "level"-wise sums of rows from s.alpha, and c_tilde is a CVXPY Variable
defining a nonzero SAGE polynomial. Then we can check if p_mod := p * mult is SAGE for any choice of c_tilde.
:param p: a Polynomial object
:param level: a nonnegative integer
:return: a CVXPY Problem that is feasible iff s * mult is SAGE for some SAGE multiplier Polynomial "mult".
"""
p.remove_terms_with_zero_as_coefficient()
constraints = []
# Make the multipler polynomial (and require that it be SAGE)
mult_alpha = hierarchy_e_k([p], k=level)
c_tilde = cvxpy.Variable(mult_alpha.shape[0], name='c_tilde')
mult = Polynomial(mult_alpha, c_tilde)
mult_sr, mult_cons = mult.sig_rep
constraints += mult_cons
constraints += relative_c_sage(mult_sr)
constraints.append(cvxpy.sum(c_tilde) >= 1)
# Make "p_mod := p * mult", and require that it be SAGE.
poly_under_test = mult * p
poly_sr, poly_cons = poly_under_test.sig_rep
constraints += poly_cons
constraints += relative_c_sage(poly_sr)
# noinspection PyTypeChecker
obj = cvxpy.Maximize(0)
prob = cvxpy.Problem(obj, constraints)
return prob
def constrained_sage_poly_primal(f, gs, p=0, q=1):
"""
Compute the primal f_{SAGE}^{(p, q)} bound for
inf f(x) : g(x) >= 0 for g \in gs.
:param f: a Polynomial.
:param gs: a list of Polynomials.
:param p: a nonnegative integer.
:param q: a positive integer.
:return: a CVXPY Problem that defines the primal formulation for f_{SAGE}^{(p, q)}.
"""
lagrangian, dualized_polynomials = make_poly_lagrangian(f, gs, p=p, q=q, add_constant_poly=(q != 1))
# The Lagrangian must be a SAGE polynomial.
lagrangian_sr, lagrangian_sr_cons = lagrangian.sig_rep
constrs = lagrangian_sr_cons
constrs += relative_c_sage(lagrangian_sr)
# The generalized dual variables must be SAGE polynomials.
for s_h, _ in dualized_polynomials:
s_h_sr, s_h_sr_cons = s_h.sig_rep
constrs += s_h_sr_cons
constrs += relative_c_sage(s_h_sr)
# The Lagrangian's coefficient vector contains references to a variable called "gamma".
# Find this variable, and define the objective appropriately.
obj = None
for v in lagrangian.constant_term().variables():
if v.name() == 'gamma':
obj = cvxpy.Maximize(v)
break
# Construct the CVXPY Problem.
prob = cvxpy.Problem(obj, constrs)
return prob
def constrained_sage_poly_dual(f, gs, p=0, q=1):
"""
Compute the dual f_{SAGE}^{(p, q)} bound for
inf f(x) : g(x) >= 0 for g \in gs.
:param f: a Polynomial.
:param gs: a list of Polynomials.
:param p: a nonnegative integer,
:param q: a positive integer.
:return: a CVXPY Problem that defines the dual formulation for f_{SAGE}^{(p, q)}.
"""
lagrangian, dualized_polynomials = make_poly_lagrangian(f, gs, p=p, q=q, add_constant_poly=(q != 1))
# In primal form, the Lagrangian is constrained to be a SAGE polynomial.
# Introduce a dual variable "v" for this constraint.
v = cvxpy.Variable(shape=(lagrangian.m, 1))
constraints = relative_c_poly_sage_star(lagrangian, v)
a = relative_coeff_vector(Polynomial({(0,) * f.n: 1}), lagrangian.alpha)
a = a.reshape(a.size, 1)
constraints.append(a.T * v == 1)
# The generalized Lagrange multipliers "s_h" are presumed to be SAGE polynomials.
# For each such multiplier, introduce an appropriate dual variable "v_h", along
# with constraints over that dual variable.
for s_h, h in dualized_polynomials:
v_h = cvxpy.Variable(name='v_h_' + str(s_h), shape=(s_h.m, 1))
constraints += relative_c_poly_sage_star(s_h, v_h)
c_h = hierarchy_c_h_array(s_h, h, lagrangian)
constraints.append(c_h * v == v_h)
# Define the dual objective function.
obj_vec = relative_coeff_vector(f, lagrangian.alpha)
obj = cvxpy.Minimize(obj_vec * v)
# Return the CVXPY Problem.
prob = cvxpy.Problem(obj, constraints)
return prob
def make_poly_lagrangian(f, gs, p, q, add_constant_poly=True):
"""
Given a problem \inf{ f(x) : g(x) >= 0 for g in gs}, construct the q-fold constraints H,
and the lagrangian
L = f - \gamma - \sum_{h \in H} s_h * h
where \gamma and the coefficients on Polynomials s_h are CVXPY Variables.
:param f: a Polynomial (or a constant numeric type).
:param gs: a nonempty list of Polynomials.
:param p: a nonnegative integer. Defines the exponent set of the Polynomials s_h.
:param q: a positive integer. Defines "H" as all products of q elements from gs.
:param add_constant_poly: a boolean. If True, makes sure that "gs" contains a
Polynomial that is identically equal to 1.
:return: a Polynomial object with coefficients as affine expressions of CVXPY Variables.
The coefficients will either be optimized directly (in the case of constrained_sage_primal),
or simply used to determine appropriate dual variables (in the case of constrained_sage_dual).
Also return a list of pairs of Polynomial objects. If the pair (p1, p2) is in this list,
then p1 is a generalized Lagrange multiplier (which we should constrain to be nonnegative,
somehow), and p2 represents a constraint p2(x) >= 0 in the polynomial program after taking
products of the gs.
"""
if not all([isinstance(g, Polynomial) for g in gs]):
raise RuntimeError('Constraints must be Polynomial objects.')
if add_constant_poly:
gs.append(Polynomial({(0,) * gs[0].n: 1}))
if not isinstance(f, Polynomial):
f = Polynomial({(0,) * gs[0].n: f})
gs = set(gs) # remove duplicates
hs = set([np.prod(comb) for comb in combinations_with_replacement(gs, q)])
gamma = cvxpy.Variable(name='gamma')
lagrangian = f - gamma
alpha_E_p = hierarchy_e_k([f] + list(gs), k=p)
dualized_polynomials = []
for h in hs:
temp_shc = cvxpy.Variable(name='shc_' + str(h), shape=(alpha_E_p.shape[0],))
temp_sh = Polynomial(alpha_E_p, temp_shc)
lagrangian -= temp_sh * h
dualized_polynomials.append((temp_sh, h))
return lagrangian, dualized_polynomials
def relative_c_poly_sage_star(p, y):
"""
:param p: a Polynomial
:param y: a cvxpy Variable with y.shape == (p.m, 1).
:return: CVXPY Constraints over y (and additional auxilliary variables, as
necessary) so that y defines a dual variable to the constraint that
"p is a SAGE polynomial."
"""
sr, sr_cons = p.sig_rep
constrs = []
evens = [i for i, row in enumerate(sr.alpha) if np.all(row % 2 == 0)]
if len(evens) < sr.m:
is_even = np.zeros(shape=(sr.m,), dtype=bool)
is_even[evens] = True
lambda_1_expr = []
lambda_2_expr = []
mu_expr = []
for i in range(sr.m):
if i in evens:
lambda_1_expr.append(0)
lambda_2_expr.append(0)
mu_expr.append(cvxpy.Variable(shape=()))
else:
lambda_1_expr.append(cvxpy.Variable(shape=(), nonneg=True))
lambda_2_expr.append(cvxpy.Variable(shape=(), nonneg=True))
mu_expr.append(0)
lambda_1_expr = cvxpy.vstack(lambda_1_expr)
lambda_2_expr = cvxpy.vstack(lambda_2_expr)
mu_expr = cvxpy.vstack(mu_expr)
v = cvxpy.Variable(shape=(sr.m, 1))
constrs = [v == lambda_1_expr + lambda_2_expr + mu_expr,
y == lambda_2_expr - lambda_1_expr + mu_expr]
constrs += relative_c_sage_star(sr, v)
else:
constrs += relative_c_sage_star(sr, y)
return constrs
def hierarchy_c_h_array(s_h, h, lagrangian):
"""
Assume (s_h * h).alpha is a subset of lagrangian.alpha.
:param s_h: a SAGE multiplier Polynomial for the constrained hierarchy
:param h: the constraint Polynomial
:param lagrangian: the Polynomial f - \gamma - \sum_{h \in H} s_h * h.
:return: a matrix c_h so that if "v" is a dual variable to the constraint
"lagrangian is a SAGE polynomial", then the constraint "s_h is SAGE"
dualizes to "c_h * v \in C_{SAGE}^{POLY}(s_h)^{\star}".
"""
c_h = np.zeros((s_h.alpha.shape[0], lagrangian.alpha.shape[0]))
for i, row in enumerate(s_h.alpha):
temp_poly = Polynomial({tuple(row): 1}) * h
c_h[i, :] = relative_coeff_vector(temp_poly, lagrangian.alpha)
return c_h
| [
"cvxpy.Minimize",
"cvxpy.Variable",
"sigpy.polys.polynomials.Polynomial",
"cvxpy.Problem",
"sigpy.sage.relative_coeff_vector",
"sigpy.sage.sage_dual",
"numpy.prod",
"sigpy.sage.relative_c_sage",
"cvxpy.vstack",
"cvxpy.sum",
"sigpy.sage.relative_c_sage_star",
"numpy.zeros",
"cvxpy.Maximize",
... | [((441, 483), 'sigpy.sage.sage_dual', 'sage_dual', (['sr', 'level'], {'additional_cons': 'cons'}), '(sr, level, additional_cons=cons)\n', (450, 483), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((649, 693), 'sigpy.sage.sage_primal', 'sage_primal', (['sr', 'level'], {'additional_cons': 'cons'}), '(sr, level, additional_cons=cons)\n', (660, 693), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((855, 897), 'sigpy.sage.sage_feasibility', 'sage_feasibility', (['sr'], {'additional_cons': 'cons'}), '(sr, additional_cons=cons)\n', (871, 897), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((1791, 1818), 'sigpy.sage.hierarchy_e_k', 'hierarchy_e_k', (['[p]'], {'k': 'level'}), '([p], k=level)\n', (1804, 1818), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((1833, 1884), 'cvxpy.Variable', 'cvxpy.Variable', (['mult_alpha.shape[0]'], {'name': '"""c_tilde"""'}), "(mult_alpha.shape[0], name='c_tilde')\n", (1847, 1884), False, 'import cvxpy\n'), ((1896, 1927), 'sigpy.polys.polynomials.Polynomial', 'Polynomial', (['mult_alpha', 'c_tilde'], {}), '(mult_alpha, c_tilde)\n', (1906, 1927), False, 'from sigpy.polys.polynomials import Polynomial\n'), ((2014, 2038), 'sigpy.sage.relative_c_sage', 'relative_c_sage', (['mult_sr'], {}), '(mult_sr)\n', (2029, 2038), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((2276, 2300), 'sigpy.sage.relative_c_sage', 'relative_c_sage', (['poly_sr'], {}), '(poly_sr)\n', (2291, 2300), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((2344, 2361), 'cvxpy.Maximize', 'cvxpy.Maximize', (['(0)'], {}), '(0)\n', (2358, 2361), False, 'import cvxpy\n'), ((2373, 2404), 'cvxpy.Problem', 'cvxpy.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (2386, 2404), False, 'import cvxpy\n'), ((3072, 3102), 'sigpy.sage.relative_c_sage', 'relative_c_sage', (['lagrangian_sr'], {}), '(lagrangian_sr)\n', (3087, 3102), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((3680, 3707), 'cvxpy.Problem', 'cvxpy.Problem', (['obj', 'constrs'], {}), '(obj, constrs)\n', (3693, 3707), False, 'import cvxpy\n'), ((4356, 4395), 'cvxpy.Variable', 'cvxpy.Variable', ([], {'shape': '(lagrangian.m, 1)'}), '(shape=(lagrangian.m, 1))\n', (4370, 4395), False, 'import cvxpy\n'), ((5139, 5181), 'sigpy.sage.relative_coeff_vector', 'relative_coeff_vector', (['f', 'lagrangian.alpha'], {}), '(f, lagrangian.alpha)\n', (5160, 5181), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((5192, 5219), 'cvxpy.Minimize', 'cvxpy.Minimize', (['(obj_vec * v)'], {}), '(obj_vec * v)\n', (5206, 5219), False, 'import cvxpy\n'), ((5263, 5294), 'cvxpy.Problem', 'cvxpy.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (5276, 5294), False, 'import cvxpy\n'), ((7052, 7080), 'cvxpy.Variable', 'cvxpy.Variable', ([], {'name': '"""gamma"""'}), "(name='gamma')\n", (7066, 7080), False, 'import cvxpy\n'), ((9469, 9526), 'numpy.zeros', 'np.zeros', (['(s_h.alpha.shape[0], lagrangian.alpha.shape[0])'], {}), '((s_h.alpha.shape[0], lagrangian.alpha.shape[0]))\n', (9477, 9526), True, 'import numpy as np\n'), ((3298, 3321), 'sigpy.sage.relative_c_sage', 'relative_c_sage', (['s_h_sr'], {}), '(s_h_sr)\n', (3313, 3321), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((4485, 4514), 'sigpy.polys.polynomials.Polynomial', 'Polynomial', (['{((0,) * f.n): 1}'], {}), '({((0,) * f.n): 1})\n', (4495, 4514), False, 'from sigpy.polys.polynomials import Polynomial\n'), ((6891, 6924), 'sigpy.polys.polynomials.Polynomial', 'Polynomial', (['{((0,) * gs[0].n): f}'], {}), '({((0,) * gs[0].n): f})\n', (6901, 6924), False, 'from sigpy.polys.polynomials import Polynomial\n'), ((7309, 7340), 'sigpy.polys.polynomials.Polynomial', 'Polynomial', (['alpha_E_p', 'temp_shc'], {}), '(alpha_E_p, temp_shc)\n', (7319, 7340), False, 'from sigpy.polys.polynomials import Polynomial\n'), ((7953, 7988), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sr.m,)', 'dtype': 'bool'}), '(shape=(sr.m,), dtype=bool)\n', (7961, 7988), True, 'import numpy as np\n'), ((8516, 8543), 'cvxpy.vstack', 'cvxpy.vstack', (['lambda_1_expr'], {}), '(lambda_1_expr)\n', (8528, 8543), False, 'import cvxpy\n'), ((8568, 8595), 'cvxpy.vstack', 'cvxpy.vstack', (['lambda_2_expr'], {}), '(lambda_2_expr)\n', (8580, 8595), False, 'import cvxpy\n'), ((8614, 8635), 'cvxpy.vstack', 'cvxpy.vstack', (['mu_expr'], {}), '(mu_expr)\n', (8626, 8635), False, 'import cvxpy\n'), ((8648, 8679), 'cvxpy.Variable', 'cvxpy.Variable', ([], {'shape': '(sr.m, 1)'}), '(shape=(sr.m, 1))\n', (8662, 8679), False, 'import cvxpy\n'), ((8829, 8856), 'sigpy.sage.relative_c_sage_star', 'relative_c_sage_star', (['sr', 'v'], {}), '(sr, v)\n', (8849, 8856), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((8886, 8913), 'sigpy.sage.relative_c_sage_star', 'relative_c_sage_star', (['sr', 'y'], {}), '(sr, y)\n', (8906, 8913), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((9639, 9689), 'sigpy.sage.relative_coeff_vector', 'relative_coeff_vector', (['temp_poly', 'lagrangian.alpha'], {}), '(temp_poly, lagrangian.alpha)\n', (9660, 9689), False, 'from sigpy.sage import sage_primal, sage_dual, sage_feasibility, hierarchy_e_k, relative_c_sage, relative_c_sage_star, relative_coeff_vector\n'), ((2062, 2080), 'cvxpy.sum', 'cvxpy.sum', (['c_tilde'], {}), '(c_tilde)\n', (2071, 2080), False, 'import cvxpy\n'), ((3598, 3615), 'cvxpy.Maximize', 'cvxpy.Maximize', (['v'], {}), '(v)\n', (3612, 3615), False, 'import cvxpy\n'), ((6808, 6841), 'sigpy.polys.polynomials.Polynomial', 'Polynomial', (['{((0,) * gs[0].n): 1}'], {}), '({((0,) * gs[0].n): 1})\n', (6818, 6841), False, 'from sigpy.polys.polynomials import Polynomial\n'), ((6975, 6988), 'numpy.prod', 'np.prod', (['comb'], {}), '(comb)\n', (6982, 6988), True, 'import numpy as np\n'), ((7887, 7907), 'numpy.all', 'np.all', (['(row % 2 == 0)'], {}), '(row % 2 == 0)\n', (7893, 7907), True, 'import numpy as np\n'), ((7001, 7037), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['gs', 'q'], {}), '(gs, q)\n', (7030, 7037), False, 'from itertools import combinations_with_replacement\n'), ((8262, 8286), 'cvxpy.Variable', 'cvxpy.Variable', ([], {'shape': '()'}), '(shape=())\n', (8276, 8286), False, 'import cvxpy\n'), ((8343, 8380), 'cvxpy.Variable', 'cvxpy.Variable', ([], {'shape': '()', 'nonneg': '(True)'}), '(shape=(), nonneg=True)\n', (8357, 8380), False, 'import cvxpy\n'), ((8419, 8456), 'cvxpy.Variable', 'cvxpy.Variable', ([], {'shape': '()', 'nonneg': '(True)'}), '(shape=(), nonneg=True)\n', (8433, 8456), False, 'import cvxpy\n')] |
#!/usr/bin/env python
"""Module for GCMSE --- Gradient Conduction Mean Square Error."""
import numpy as np
from scipy import ndimage
def GCMSE(ref_image, work_image, kappa=0.5, option=1):
"""GCMSE --- Gradient Conduction Mean Square Error.
Computation of the GCMSE. An image quality assessment measurement
for image filtering, focused on edge preservation evaluation.
Both input images are compared, returning a float number. As little
as the GCMSE is, more similar the images are. This metric is edge
preservation oriented, thus differences between border regions will
contribute more to the final result.
The borders are obtained from the reference image, and it only works
with images of the same scale, size and geometry. This metric is not
intended to measure any image processing applications but filtering
i.e.: it will NOT work for assessing the quality of compression,
contrast stretching...
Parameters
---------
ref_image[]: Array of pixels. Pixel values 0 to 255.
Reference image. The border regions will be obtained from it.
This image is the ideal objective, and the filtered images must
be as much similar to it as possible.
work_image[]: Array of pixels. Pixel values 0 to 255.
Image that is compared to the reference one.
kappa: decimal number. Values 0 to 1
Conductance parameter. It increases the amount of the images
that are analyzed, as it defines the permisivity for pixels to
belong to border regions, and how high is their contribution.
option: integer. Values: 1 or 2
Select which of the Perona-Malik equations will be used.
Returns
-------
gcmse: float
Value of the GCMSE metric between the 2 provided images. It gets
smaller as the images are more similar.
weight: float
Amount of the image that has been taken into account.
"""
# Normalization of the images to [0,1] values.
ref_image_float = ref_image.astype('float32')
work_image_float = work_image.astype('float32')
normed_ref_image = ref_image_float / 255
normed_work_image = work_image_float / 255
# Initialization and calculation of south and east gradients arrays.
gradient_S = np.zeros_like(normed_ref_image)
gradient_E = gradient_S.copy()
gradient_S[:-1,: ] = np.diff(normed_ref_image, axis=0)
gradient_E[: ,:-1] = np.diff(normed_ref_image, axis=1)
# Image conduction is calculated using the Perona-Malik equations.
if option == 1:
cond_S = np.exp(-(gradient_S/kappa) ** 2)
cond_E = np.exp(-(gradient_E/kappa) ** 2)
elif option == 2:
cond_S = 1.0 / (1 + (gradient_S/kappa)**2)
cond_E = 1.0 / (1 + (gradient_E/kappa)**2)
# New conduction components are initialized to 1 in order to treat
# image corners as homogeneous regions
cond_N = np.ones_like(normed_ref_image)
cond_W = cond_N.copy()
# South and East arrays values are moved one position in order to
# obtain North and West values, respectively.
cond_N[1:, :] = cond_S[:-1, :]
cond_W[:, 1:] = cond_E[:, :-1]
# Conduction module is the mean of the 4 directional values.
conduction = (cond_N + cond_S + cond_W + cond_E) / 4
conduction = np.clip (conduction, 0., 1.)
G = 1 - conduction
# Calculation of the GCMSE value
num = ((G*(normed_ref_image - normed_work_image)) ** 2).sum()
gcmse = num * normed_ref_image.size / G.sum()
weight = G.sum() / G.size
return [gcmse, weight]
| [
"numpy.clip",
"numpy.ones_like",
"numpy.diff",
"numpy.exp",
"numpy.zeros_like"
] | [((2328, 2359), 'numpy.zeros_like', 'np.zeros_like', (['normed_ref_image'], {}), '(normed_ref_image)\n', (2341, 2359), True, 'import numpy as np\n'), ((2420, 2453), 'numpy.diff', 'np.diff', (['normed_ref_image'], {'axis': '(0)'}), '(normed_ref_image, axis=0)\n', (2427, 2453), True, 'import numpy as np\n'), ((2479, 2512), 'numpy.diff', 'np.diff', (['normed_ref_image'], {'axis': '(1)'}), '(normed_ref_image, axis=1)\n', (2486, 2512), True, 'import numpy as np\n'), ((2969, 2999), 'numpy.ones_like', 'np.ones_like', (['normed_ref_image'], {}), '(normed_ref_image)\n', (2981, 2999), True, 'import numpy as np\n'), ((3362, 3391), 'numpy.clip', 'np.clip', (['conduction', '(0.0)', '(1.0)'], {}), '(conduction, 0.0, 1.0)\n', (3369, 3391), True, 'import numpy as np\n'), ((2626, 2660), 'numpy.exp', 'np.exp', (['(-(gradient_S / kappa) ** 2)'], {}), '(-(gradient_S / kappa) ** 2)\n', (2632, 2660), True, 'import numpy as np\n'), ((2676, 2710), 'numpy.exp', 'np.exp', (['(-(gradient_E / kappa) ** 2)'], {}), '(-(gradient_E / kappa) ** 2)\n', (2682, 2710), True, 'import numpy as np\n')] |
from sklearn.neighbors import LocalOutlierFactor
from pyod.models.iforest import IForest
from pyod.models.hbos import HBOS
from pyod.models.loda import LODA
from pyod.models.copod import COPOD
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
import ast
import eval.evaluation_utils as utils
from sklearn import metrics
from config import eva_root
def evaluation_od_train(x, y, data_name, model_name="iforest", chosen_subspace=None):
"""
using anomaly detector to yield anomaly score for each subspace,
generate two files: the subspaces with the highest anomaly score & lof score for each subspace
:param x: data matrix
:param y: class information
:param data_name: the data set name, using for naming the ground truth file
:param model_name: anomaly detector name, default: lof
:param chosen_subspace: use this to only evaluate a subset of the power set of full feature space
:return: df: a ground-truth map using anomaly idx as key and ground truth feature subspace as value.
"""
global chosen_model
dim = x.shape[1]
ano_idx = np.where(y == 1)[0]
n_ano = len(ano_idx)
# get all the possible feature subset or just use given subset list
f_subsets = utils.get_subset_candidate(dim, chosen_subspace)
# score anomalies in each subspace, generate the score matrix
n_subsets = len(f_subsets)
score_matrix = np.zeros([n_ano, n_subsets])
for i in tqdm(range(n_subsets)):
subset = f_subsets[i]
x_subset = x[:, subset]
if model_name == "iforest":
clf = IForest()
clf.fit(x_subset)
od_score = clf.decision_scores_
elif model_name == "copod":
clf = COPOD()
clf.fit(x_subset)
od_score = clf.decision_scores_
elif model_name == "hbos":
clf = HBOS()
clf.fit(x_subset)
od_score = clf.decision_scores_
else:
raise ValueError("unsupported od model")
od_score = utils.min_max_norm(od_score)
score_matrix[:, i] = od_score[ano_idx]
if not os.path.exists(eva_root + "data_od_evaluation/"):
os.makedirs(eva_root + "data_od_evaluation/")
# score matrix to df
anomaly_score_df = pd.DataFrame(data=score_matrix, columns=[str(s) for s in f_subsets])
col_name = anomaly_score_df.columns.tolist()
col_name.insert(0, 'ano_idx')
anomaly_score_df["ano_idx"] = ano_idx
anomaly_score_df = anomaly_score_df.reindex(columns=col_name)
path1 = eva_root + "data_od_evaluation/" + data_name + "_score_" + model_name + ".csv"
anomaly_score_df.to_csv(path1, index=False)
# get the ground truth (one subspace for each anomaly that the anomaly can obtain the highest anomaly score)
g_truth_df = pd.DataFrame(columns=["ano_idx", "exp_subspace"])
exp_subspaces = []
for ii, ano_score in enumerate(score_matrix):
max_score_idx = int(np.argmax(ano_score))
exp_subset = str(f_subsets[max_score_idx])
exp_subspaces.append(exp_subset)
g_truth_df["ano_idx"] = ano_idx
g_truth_df["exp_subspace"] = exp_subspaces
g_truth_df.astype({"exp_subspace": "object"})
path2 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
g_truth_df.to_csv(path2, index=False)
return anomaly_score_df, g_truth_df
def evaluation_od(exp_subspace_list, x, y, data_name, model_name):
"""
use outlier detection to evaluate the explanation subspace for each anomaly data object,
to evaluate whether this subspace is a high-contrast subspace to highlight this anomaly
i.e., the anomaly detector can or cannot get a higher score in this space
:param exp_subspace_list: explanation feature subspace for each anomaly, corresponding to ano_idx
:param x: data set
:param y: label
:param data_name: name of dataset
:param model_name: the name of anomaly detector to generate ground truth
:return: average precision, jaccard, and anomaly score
"""
path1 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
if not os.path.exists(path1):
print("annotation file not found, labeling now...")
_, g_truth_df = evaluation_od_train(x, y, data_name, model_name)
else:
g_truth_df = pd.read_csv(path1)
ano_idx = np.where(y == 1)[0]
precision_list = np.zeros(len(ano_idx))
jaccard_list = np.zeros(len(ano_idx))
recall_list = np.zeros(len(ano_idx))
for ii, ano in enumerate(ano_idx):
exp_subspace = list(exp_subspace_list[ii])
gt_subspace_str = g_truth_df.loc[g_truth_df["ano_idx"] == ano]["exp_subspace"].values[0]
gt_subspace = ast.literal_eval(gt_subspace_str)
overlap = list(set(gt_subspace).intersection(set(exp_subspace)))
union = list(set(gt_subspace).union(set(exp_subspace)))
precision_list[ii] = len(overlap) / len(exp_subspace)
jaccard_list[ii] = len(overlap) / len(union)
recall_list[ii] = len(overlap) / len(gt_subspace)
return precision_list.mean(), recall_list.mean(), jaccard_list.mean()
def evaluation_od_auc(feature_weight, x, y, data_name, model_name="iforest"):
"""
use outlier detection to evaluate the explanation subspace for each anomaly data,
whether this subspace is a high-contrast subspace to highlight this anomaly
:param exp_subspace_list: explanation feature subspace for each anomaly, corresponding to ano_idx
:param x: data set
:param y: label
:param data_name: name of dataset
:param model_name: the name of anomaly detector to generate ground truth
:return: average precision, jaccard, and anomaly score
"""
path1 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
if not os.path.exists(path1):
print("annotation file not found, labeling now...")
_, g_truth_df = evaluation_od_train(x, y, data_name, model_name)
else:
g_truth_df = pd.read_csv(path1)
ano_idx = np.where(y == 1)[0]
dim = x.shape[1]
auroc_list = np.zeros(len(ano_idx))
aupr_list = np.zeros(len(ano_idx))
for ii, ano in enumerate(ano_idx):
score = feature_weight[ii]
# ground_truth metrics
gt_subspace_str = g_truth_df.loc[g_truth_df["ano_idx"] == ano]["exp_subspace"].values[0]
gt_subspace = ast.literal_eval(gt_subspace_str)
gt = np.zeros(dim, dtype=int)
gt[gt_subspace] = 1
if len(gt_subspace) == dim:
auroc_list[ii] = 1
aupr_list[ii] = 1
else:
precision, recall, _ = metrics.precision_recall_curve(gt, score)
aupr_list[ii] = metrics.auc(recall, precision)
auroc_list[ii] = metrics.roc_auc_score(gt, score)
return aupr_list.mean(), auroc_list.mean()
| [
"os.path.exists",
"eval.evaluation_utils.min_max_norm",
"os.makedirs",
"pandas.read_csv",
"numpy.where",
"pyod.models.iforest.IForest",
"sklearn.metrics.auc",
"pyod.models.hbos.HBOS",
"sklearn.metrics.precision_recall_curve",
"eval.evaluation_utils.get_subset_candidate",
"numpy.argmax",
"sklea... | [((1239, 1287), 'eval.evaluation_utils.get_subset_candidate', 'utils.get_subset_candidate', (['dim', 'chosen_subspace'], {}), '(dim, chosen_subspace)\n', (1265, 1287), True, 'import eval.evaluation_utils as utils\n'), ((1405, 1433), 'numpy.zeros', 'np.zeros', (['[n_ano, n_subsets]'], {}), '([n_ano, n_subsets])\n', (1413, 1433), True, 'import numpy as np\n'), ((2801, 2850), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ano_idx', 'exp_subspace']"}), "(columns=['ano_idx', 'exp_subspace'])\n", (2813, 2850), True, 'import pandas as pd\n'), ((1105, 1121), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (1113, 1121), True, 'import numpy as np\n'), ((2030, 2058), 'eval.evaluation_utils.min_max_norm', 'utils.min_max_norm', (['od_score'], {}), '(od_score)\n', (2048, 2058), True, 'import eval.evaluation_utils as utils\n'), ((2118, 2166), 'os.path.exists', 'os.path.exists', (["(eva_root + 'data_od_evaluation/')"], {}), "(eva_root + 'data_od_evaluation/')\n", (2132, 2166), False, 'import os\n'), ((2176, 2221), 'os.makedirs', 'os.makedirs', (["(eva_root + 'data_od_evaluation/')"], {}), "(eva_root + 'data_od_evaluation/')\n", (2187, 2221), False, 'import os\n'), ((4137, 4158), 'os.path.exists', 'os.path.exists', (['path1'], {}), '(path1)\n', (4151, 4158), False, 'import os\n'), ((4324, 4342), 'pandas.read_csv', 'pd.read_csv', (['path1'], {}), '(path1)\n', (4335, 4342), True, 'import pandas as pd\n'), ((4358, 4374), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (4366, 4374), True, 'import numpy as np\n'), ((4716, 4749), 'ast.literal_eval', 'ast.literal_eval', (['gt_subspace_str'], {}), '(gt_subspace_str)\n', (4732, 4749), False, 'import ast\n'), ((5817, 5838), 'os.path.exists', 'os.path.exists', (['path1'], {}), '(path1)\n', (5831, 5838), False, 'import os\n'), ((6004, 6022), 'pandas.read_csv', 'pd.read_csv', (['path1'], {}), '(path1)\n', (6015, 6022), True, 'import pandas as pd\n'), ((6038, 6054), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (6046, 6054), True, 'import numpy as np\n'), ((6384, 6417), 'ast.literal_eval', 'ast.literal_eval', (['gt_subspace_str'], {}), '(gt_subspace_str)\n', (6400, 6417), False, 'import ast\n'), ((6431, 6455), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'int'}), '(dim, dtype=int)\n', (6439, 6455), True, 'import numpy as np\n'), ((1589, 1598), 'pyod.models.iforest.IForest', 'IForest', ([], {}), '()\n', (1596, 1598), False, 'from pyod.models.iforest import IForest\n'), ((2953, 2973), 'numpy.argmax', 'np.argmax', (['ano_score'], {}), '(ano_score)\n', (2962, 2973), True, 'import numpy as np\n'), ((6631, 6672), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['gt', 'score'], {}), '(gt, score)\n', (6661, 6672), False, 'from sklearn import metrics\n'), ((6701, 6731), 'sklearn.metrics.auc', 'metrics.auc', (['recall', 'precision'], {}), '(recall, precision)\n', (6712, 6731), False, 'from sklearn import metrics\n'), ((6761, 6793), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['gt', 'score'], {}), '(gt, score)\n', (6782, 6793), False, 'from sklearn import metrics\n'), ((1727, 1734), 'pyod.models.copod.COPOD', 'COPOD', ([], {}), '()\n', (1732, 1734), False, 'from pyod.models.copod import COPOD\n'), ((1862, 1868), 'pyod.models.hbos.HBOS', 'HBOS', ([], {}), '()\n', (1866, 1868), False, 'from pyod.models.hbos import HBOS\n')] |
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def normalize_land_water(data, threshold=0.1):
res = [[0 for i in range(len(data))] for j in range(len(data))]
for idv, vline in enumerate(data):
for idh, hcell in enumerate(vline):
if hcell >= threshold:
res[idv][idh] = 1
return res
def normalize_0_1(data):
res = [[0 for i in range(len(data))] for j in range(len(data))]
for idv, vline in enumerate(data):
maxval = max(vline)
minval = min(vline)
for idh, hcell in enumerate(vline):
try:
res[idv][idh] = (hcell - minval)/(maxval - minval)
except ZeroDivisionError:
res[idv][idh] = (hcell - minval)
return res
def read_data_from_hmap_file(fname):
data = []
with open(fname) as f:
for line in f:
data.append([float(x) for x in line.split()])
return data
def main():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
data = read_data_from_hmap_file(sys.argv[1])
x = list(range(len(data)))
y = list(range(len(data)))
X, Y = np.meshgrid(x, y)
Z = np.array(data)
ax.plot_surface(X, Y, Z, cmap='terrain')
ax.set_zlim([0, 1.5])
plt.show()
if __name__ == '__main__':
main() | [
"numpy.array",
"numpy.meshgrid",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((879, 891), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (889, 891), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1066), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1060, 1066), True, 'import numpy as np\n'), ((1072, 1086), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1080, 1086), True, 'import numpy as np\n'), ((1157, 1167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1165, 1167), True, 'import matplotlib.pyplot as plt\n')] |
"""File IO for Flickr 30K images and text captions.
Author: <NAME>
Contact: <EMAIL>
Date: September 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import numpy as np
def load_flickr30k_splits(splits_dir="data/splits/flickr30k",
flickr8k_splits=None):
"""Load train-dev-test splits from Flicker 30k text caption corpus."""
set_dict = {}
for subset in ["train", "dev", "test"]:
if subset not in set_dict:
set_dict[subset] = []
subset_path = os.path.join(
splits_dir, "{}.txt".format(subset))
assert os.path.exists(subset_path)
logging.log(logging.INFO, "Loading Flickr 30k {} split: {}".format(
subset, subset_path))
with open(subset_path) as f:
for line in f:
set_dict[subset].append(os.path.splitext(line.strip())[0])
if flickr8k_splits is not None: # remove flickr 8k images from 30k splits
set_dict = remove_flickr8k_splits(set_dict, flickr8k_splits)
return set_dict
def remove_flickr8k_splits(flickr30k_splits, flickr8k_splits):
"""Remove Flickr 8k images from Flickr 30k train-dev-test splits."""
flickr8k_all = []
for _, uids in flickr8k_splits.items():
flickr8k_all.extend(list(map(lambda uid: uid.split("_")[0], uids)))
flickr30_removed = {}
for subset, uids in flickr30k_splits.items():
uids_removed = []
for uid in uids:
if uid not in flickr8k_all:
uids_removed.append(uid)
flickr30_removed[subset] = uids_removed
return flickr30_removed
def _load_flickr30k_unrelated_captions(splits_dir="data/splits/flickr30k"):
"""Load unrelated image captions from the Flickr 30k text caption corpus."""
path = os.path.join(splits_dir, "UNRELATED_CAPTIONS")
assert os.path.exists(path)
image_uids, caption_numbers = [], []
with open(path, "rb") as f:
next(f) # skip header line
for line in f:
image_uid, caption_number = line.decode("utf8").strip().split(" ")
image_uids.append(image_uid)
caption_numbers.append(str(int(caption_number) - 1))
image_uids = np.asarray(image_uids)
caption_numbers = np.asarray(caption_numbers)
return image_uids, caption_numbers
def load_flickr30k_captions(captions_dir, splits_dir="data/splits/flickr30k",
flickr8k_splits=None):
"""Load Flickr 30k text caption corpus."""
train, val, test = None, None, None
split_dict = load_flickr30k_splits(splits_dir, flickr8k_splits)
captions_path = os.path.join(
captions_dir, "results_20130124.token")
assert os.path.exists(captions_path)
logging.log(logging.INFO, "Loading Flickr 30k text caption corpus: {}".format(
captions_path))
image_uids, captions, caption_numbers = [], [], []
with open(captions_path, "rb") as f:
for line in f:
caption_image, caption = line.decode("utf8").split("\t")
image_uid, caption_number = caption_image.split("#")
image_uid = image_uid.split(".jpg")[0]
image_uids.append(image_uid)
captions.append(str(caption).strip().lower())
caption_numbers.append(caption_number)
# remove unrelated captions
flickr30k_unrelated = _load_flickr30k_unrelated_captions(splits_dir)
def filter_remove_unrelated(index):
unrelated_idx = np.where(flickr30k_unrelated[0] == image_uids[index])[0]
return caption_numbers[index] not in flickr30k_unrelated[1][unrelated_idx]
filter_idx = list(filter(filter_remove_unrelated, range(len(image_uids))))
image_uids = np.asarray(image_uids)[filter_idx]
captions = np.asarray(captions)[filter_idx]
caption_numbers = np.asarray(caption_numbers)[filter_idx]
# split into train-dev-test
train_idx = np.isin(image_uids, split_dict["train"])
val_idx = np.isin(image_uids, split_dict["dev"])
test_idx = np.isin(image_uids, split_dict["test"])
train = (image_uids[train_idx], captions[train_idx], caption_numbers[train_idx])
val = (image_uids[val_idx], captions[val_idx], caption_numbers[val_idx])
test = (image_uids[test_idx], captions[test_idx], caption_numbers[test_idx])
return train, val, test
def fetch_flickr30k_image_paths(images_dir, splits_dir="data/splits/flickr30k",
flickr8k_splits=None):
"""Fetch Flickr 30k image paths corresponding to the caption corpus splits."""
train, val, test = None, None, None
split_dict = load_flickr30k_splits(splits_dir, flickr8k_splits)
image_paths = np.asarray([
os.path.join(images_dir, name) for name in os.listdir(images_dir)])
image_uids = np.asarray([
os.path.splitext(os.path.split(path)[-1])[0] for path in image_paths])
train_idx = np.isin(image_uids, split_dict["train"])
val_idx = np.isin(image_uids, split_dict["dev"])
test_idx = np.isin(image_uids, split_dict["test"])
train = (image_uids[train_idx], image_paths[train_idx])
val = (image_uids[val_idx], image_paths[val_idx])
test = (image_uids[test_idx], image_paths[test_idx])
return train, val, test
| [
"os.path.exists",
"os.listdir",
"numpy.where",
"numpy.asarray",
"os.path.join",
"numpy.isin",
"os.path.split"
] | [((1887, 1933), 'os.path.join', 'os.path.join', (['splits_dir', '"""UNRELATED_CAPTIONS"""'], {}), "(splits_dir, 'UNRELATED_CAPTIONS')\n", (1899, 1933), False, 'import os\n'), ((1945, 1965), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1959, 1965), False, 'import os\n'), ((2303, 2325), 'numpy.asarray', 'np.asarray', (['image_uids'], {}), '(image_uids)\n', (2313, 2325), True, 'import numpy as np\n'), ((2348, 2375), 'numpy.asarray', 'np.asarray', (['caption_numbers'], {}), '(caption_numbers)\n', (2358, 2375), True, 'import numpy as np\n'), ((2724, 2776), 'os.path.join', 'os.path.join', (['captions_dir', '"""results_20130124.token"""'], {}), "(captions_dir, 'results_20130124.token')\n", (2736, 2776), False, 'import os\n'), ((2797, 2826), 'os.path.exists', 'os.path.exists', (['captions_path'], {}), '(captions_path)\n', (2811, 2826), False, 'import os\n'), ((3993, 4033), 'numpy.isin', 'np.isin', (['image_uids', "split_dict['train']"], {}), "(image_uids, split_dict['train'])\n", (4000, 4033), True, 'import numpy as np\n'), ((4048, 4086), 'numpy.isin', 'np.isin', (['image_uids', "split_dict['dev']"], {}), "(image_uids, split_dict['dev'])\n", (4055, 4086), True, 'import numpy as np\n'), ((4102, 4141), 'numpy.isin', 'np.isin', (['image_uids', "split_dict['test']"], {}), "(image_uids, split_dict['test'])\n", (4109, 4141), True, 'import numpy as np\n'), ((4978, 5018), 'numpy.isin', 'np.isin', (['image_uids', "split_dict['train']"], {}), "(image_uids, split_dict['train'])\n", (4985, 5018), True, 'import numpy as np\n'), ((5033, 5071), 'numpy.isin', 'np.isin', (['image_uids', "split_dict['dev']"], {}), "(image_uids, split_dict['dev'])\n", (5040, 5071), True, 'import numpy as np\n'), ((5087, 5126), 'numpy.isin', 'np.isin', (['image_uids', "split_dict['test']"], {}), "(image_uids, split_dict['test'])\n", (5094, 5126), True, 'import numpy as np\n'), ((702, 729), 'os.path.exists', 'os.path.exists', (['subset_path'], {}), '(subset_path)\n', (716, 729), False, 'import os\n'), ((3799, 3821), 'numpy.asarray', 'np.asarray', (['image_uids'], {}), '(image_uids)\n', (3809, 3821), True, 'import numpy as np\n'), ((3849, 3869), 'numpy.asarray', 'np.asarray', (['captions'], {}), '(captions)\n', (3859, 3869), True, 'import numpy as np\n'), ((3904, 3931), 'numpy.asarray', 'np.asarray', (['caption_numbers'], {}), '(caption_numbers)\n', (3914, 3931), True, 'import numpy as np\n'), ((3561, 3614), 'numpy.where', 'np.where', (['(flickr30k_unrelated[0] == image_uids[index])'], {}), '(flickr30k_unrelated[0] == image_uids[index])\n', (3569, 3614), True, 'import numpy as np\n'), ((4784, 4814), 'os.path.join', 'os.path.join', (['images_dir', 'name'], {}), '(images_dir, name)\n', (4796, 4814), False, 'import os\n'), ((4827, 4849), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (4837, 4849), False, 'import os\n'), ((4907, 4926), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (4920, 4926), False, 'import os\n')] |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import os
import json
import scipy
import scipy.stats
jian_file = 'result2'
grid_file = 'result2'
datasets = ['HGBn-ACM', 'HGBn-DBLP', 'HGBn-IMDB', 'HNE-PubMed', 'HGBn-Freebase', 'HGBn-ACM']
xL = [[[0, 1], [0, 1], [0, 1],[0, 1] ,[0, 1],[0, 1],],
[[0.8, 0.95], [0.7, 0.95], [0.5, 0.65], [0.1, 0.6], [0.2, 0.5], [0.2, 0.5]],]
yL = [[[0, 1], [0, 1], [0, 1],[0, 1], [0, 1],[0, 1],],
[[0.6, 1], [0.55, 1], [0.6, 1],[0.6, 1], [0.6, 1],[0.6, 1]]]
# jian_file = 'result2'
# grid_file = 'result2'
# datasets = ['HGBl-ACM', 'HGBl-DBLP', 'HGBl-IMDB', 'HGBl-PubMed', 'HGBl-amazon', 'HGBl-LastFM']
# xL = [[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1]],
# [[0.8, 1], [0.6, 1], [0.5, 1], [0.7, 1], [0.8, 1],[0.8, 1]]]
# yL = [[[0,1], [0,1], [0,1],[0,1], [0,1],[0,1],],
# [[0.6, 1], [0.6, 1], [0.6, 1],[0.6, 1], [0.6, 1],[0.6, 1],]]
score = 'score'
dim = 'subgraph'
num_data = len(datasets)
# Detectron colors
_COLORS = np.array([
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188
]).astype(np.float32).reshape((-1, 3))
# Random number generator seed
_RNG_SEED = 1
# Fix RNG seeds
random.seed(_RNG_SEED)
np.random.seed(_RNG_SEED)
# Directory where sweep summaries are stored
_DATA_DIR = '.'
def load_sweep(sweep_name):
"""Loads a sweep summary."""
summary_path = os.path.join(_DATA_DIR, '{}.csv'.format(sweep_name))
with open(summary_path, 'r') as f:
sweep_summary = pd.read_csv(f, sep=',')
return sweep_summary
# Load ResNet sweep
results1 = load_sweep('{}'.format(jian_file))
results2 = load_sweep('{}'.format(grid_file))
def draw( i, j, ax, has_y=True, has_x=True):
if i == 0:
results = results1
else:
results = results2
dataset = datasets[j]
homo = results[(results[dim] == 'homo') & (results['dataset'] == dataset)]
homo = set(homo[score].values.tolist())
relation = results[(results[dim] == 'relation') & (results['dataset'] == dataset)]
relation = set(relation[score].values.tolist())
mp = results[(results[dim] == 'metapath') & (results['dataset'] == dataset)]
mp = set(mp[score].values.tolist())
mix = results[(results[dim] == 'mixed') & (results['dataset'] == dataset)]
mix = set(mix[score].values.tolist())
# Text experiment, point estimates
random.seed(_RNG_SEED)
num_trials = 5000
N_mp = len(mp)
N_relation = len(relation)
N_homo = len(homo)
N_mix = len(mix)
random.seed(_RNG_SEED)
err_homo = sorted([j for j in homo])
err_mp = sorted([j for j in mp])
err_relation = sorted([j for j in relation])
err_mix = sorted([j for j in mix])
edf_homo = np.arange(N_homo) / float(N_homo - 1)
edf_relation = np.arange(N_relation) / float(N_relation - 1)
edf_mp = np.arange(N_mp) / float(N_mp - 1)
edf_mix = np.arange(N_mix) / float(N_mix)
ax.plot(
err_homo, edf_homo, color=_COLORS[1], linewidth=2, alpha=0.8,
zorder=1, label='{}=homo'.format(dim)
)
ax.plot(
err_relation, edf_relation, color=_COLORS[0], linewidth=2, alpha=0.8,
zorder=0, label='{}=relation'.format(dim)
)
ax.plot(
err_mp, edf_mp, color=_COLORS[2], linewidth=2, alpha=0.8,
zorder=1, label='{}=metapath'.format(dim)
)
# ax.plot(
# err_mix, edf_mix, color=_COLORS[3], linewidth=2, alpha=0.8,
# zorder=0, label='{}=mixed'.format(dim)
# )
#ax.set_xlim([4.5, 13.5])
ax.set_xlim(xL[i][j])
ax.set_ylim(yL[i][j])
#ax.set_xticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
if not has_x:
ax.set_xlabel('', fontsize=20)
else:
ax.set_xlabel('{}'.format(dataset), fontsize=20)
if not has_y:
ax.set_ylabel('', fontsize=20)
else:
ax.set_ylabel('cumulative prob.', fontsize=20)
ax.grid(alpha=0.4)
#ax.legend(loc='upper left', prop={'size': 14})
r, c = 2, num_data
l_w, l_h = 4, 3
r_w, r_h = 4, 3
fig, axes = plt.subplots(
nrows=r, ncols=c,
figsize=(22, 6),
gridspec_kw = {'width_ratios': [2] * num_data}
)
for i in range(2):
for j in range(len(datasets)):
draw(i, j, axes[i, j], has_x = i==1, has_y= j == 0)
plt.tight_layout()
#plt.subplots_adjust(left=0.1, bottom=0.2, right=0.85, top=0.9, hspace=0.4, wspace=0.5)
plt.subplots_adjust(left=0.05, bottom=0.2, right=0.97, top=0.9, hspace=0.3, wspace=0.25)
lines, labels = fig.axes[-1].get_legend_handles_labels()
fig.legend(lines, labels, loc='center right', title_fontsize= 'large', )
path = 'figs/1112'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig('{}/all_{}_node_1112.png'.format(path, dim), dpi=300)
plt.show() | [
"os.path.exists",
"os.makedirs",
"pandas.read_csv",
"numpy.arange",
"random.seed",
"numpy.array",
"numpy.random.seed",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((1318, 1340), 'random.seed', 'random.seed', (['_RNG_SEED'], {}), '(_RNG_SEED)\n', (1329, 1340), False, 'import random\n'), ((1342, 1367), 'numpy.random.seed', 'np.random.seed', (['_RNG_SEED'], {}), '(_RNG_SEED)\n', (1356, 1367), True, 'import numpy as np\n'), ((4242, 4339), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'r', 'ncols': 'c', 'figsize': '(22, 6)', 'gridspec_kw': "{'width_ratios': [2] * num_data}"}), "(nrows=r, ncols=c, figsize=(22, 6), gridspec_kw={'width_ratios':\n [2] * num_data})\n", (4254, 4339), True, 'import matplotlib.pyplot as plt\n'), ((4478, 4496), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4494, 4496), True, 'import matplotlib.pyplot as plt\n'), ((4587, 4679), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.05)', 'bottom': '(0.2)', 'right': '(0.97)', 'top': '(0.9)', 'hspace': '(0.3)', 'wspace': '(0.25)'}), '(left=0.05, bottom=0.2, right=0.97, top=0.9, hspace=0.3,\n wspace=0.25)\n', (4606, 4679), True, 'import matplotlib.pyplot as plt\n'), ((4955, 4965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4963, 4965), True, 'import matplotlib.pyplot as plt\n'), ((2524, 2546), 'random.seed', 'random.seed', (['_RNG_SEED'], {}), '(_RNG_SEED)\n', (2535, 2546), False, 'import random\n'), ((2677, 2699), 'random.seed', 'random.seed', (['_RNG_SEED'], {}), '(_RNG_SEED)\n', (2688, 2699), False, 'import random\n'), ((4840, 4860), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4854, 4860), False, 'import os\n'), ((4867, 4884), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4878, 4884), False, 'import os\n'), ((1636, 1659), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""","""'}), "(f, sep=',')\n", (1647, 1659), True, 'import pandas as pd\n'), ((2894, 2911), 'numpy.arange', 'np.arange', (['N_homo'], {}), '(N_homo)\n', (2903, 2911), True, 'import numpy as np\n'), ((2952, 2973), 'numpy.arange', 'np.arange', (['N_relation'], {}), '(N_relation)\n', (2961, 2973), True, 'import numpy as np\n'), ((3012, 3027), 'numpy.arange', 'np.arange', (['N_mp'], {}), '(N_mp)\n', (3021, 3027), True, 'import numpy as np\n'), ((3061, 3077), 'numpy.arange', 'np.arange', (['N_mix'], {}), '(N_mix)\n', (3070, 3077), True, 'import numpy as np\n'), ((1069, 1185), 'numpy.array', 'np.array', (['[0.0, 0.447, 0.741, 0.85, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, \n 0.556, 0.466, 0.674, 0.188]'], {}), '([0.0, 0.447, 0.741, 0.85, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,\n 0.184, 0.556, 0.466, 0.674, 0.188])\n', (1077, 1185), True, 'import numpy as np\n')] |
from skbio.alignment import TabularMSA
from skbio import DNA
from io import StringIO
import argparse
import numpy as np
from collections import Counter
p = argparse.ArgumentParser()
p.add_argument("--msa")
p.add_argument("-gap_frac", default=0.5, type=float)
p.add_argument("-only_plot_mutyh", action="store_true")
args = p.parse_args()
align = TabularMSA.read(args.msa, constructor=DNA, format="fasta" )
# we'll only plot the 6 amino acids of interest
aa_positions_to_keep = [5, 24, 69, 153, 312, 313]
nuc_positions_to_keep = []
for aa in aa_positions_to_keep:
for nuc_i in range(3):
nuc_position = (aa * 3) - 3 + nuc_i
nuc_positions_to_keep.append(nuc_position)
bl6_seq_idx = 1
bl6_seq = str(align[bl6_seq_idx])
if not args.only_plot_mutyh:
nuc_positions_to_keep = range(len(bl6_seq))
good_nucs = []
good_nuc_counter = 0
for nuc_i, nuc in enumerate(bl6_seq):
if nuc == "-": continue
else:
if good_nuc_counter in nuc_positions_to_keep:
good_nucs.append(nuc_i)
good_nuc_counter += 1
n_nucs = align.shape.position
n_seqs = align.shape.sequence
seq_to_keep = np.zeros(n_nucs, dtype=np.int8)
for nuc_i in np.arange(n_nucs):
nucs_at_site = str(align[:,nuc_i])
n_gaps = Counter(nucs_at_site)['-']
if nuc_i not in good_nucs: continue
if (n_seqs - n_gaps) / n_seqs >= args.gap_frac: seq_to_keep[nuc_i] = 1
with StringIO() as fh:
print(align[:,seq_to_keep.astype(bool)].write(fh).getvalue())
| [
"argparse.ArgumentParser",
"collections.Counter",
"numpy.zeros",
"skbio.alignment.TabularMSA.read",
"io.StringIO",
"numpy.arange"
] | [((157, 182), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (180, 182), False, 'import argparse\n'), ((347, 405), 'skbio.alignment.TabularMSA.read', 'TabularMSA.read', (['args.msa'], {'constructor': 'DNA', 'format': '"""fasta"""'}), "(args.msa, constructor=DNA, format='fasta')\n", (362, 405), False, 'from skbio.alignment import TabularMSA\n'), ((1127, 1158), 'numpy.zeros', 'np.zeros', (['n_nucs'], {'dtype': 'np.int8'}), '(n_nucs, dtype=np.int8)\n', (1135, 1158), True, 'import numpy as np\n'), ((1173, 1190), 'numpy.arange', 'np.arange', (['n_nucs'], {}), '(n_nucs)\n', (1182, 1190), True, 'import numpy as np\n'), ((1394, 1404), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1402, 1404), False, 'from io import StringIO\n'), ((1245, 1266), 'collections.Counter', 'Counter', (['nucs_at_site'], {}), '(nucs_at_site)\n', (1252, 1266), False, 'from collections import Counter\n')] |
# Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import importlib
import json
import os
import random
import numpy as np
import pytest
import torch
eval_model = importlib.import_module("compressai.utils.video.eval_model.__main__")
update_model = importlib.import_module("compressai.utils.update_model.__main__")
# Example: GENERATE_EXPECTED=1 pytest -sx tests/test_eval_model.py
GENERATE_EXPECTED = os.getenv("GENERATE_EXPECTED")
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def test_eval_model_video():
with pytest.raises(SystemExit):
eval_model.main(["--help"])
with pytest.raises(SystemExit):
eval_model.main([])
with pytest.raises(SystemExit):
eval_model.main(["pretrained"])
with pytest.raises(SystemExit):
eval_model.main(
[
"pretrained",
".",
"-a",
"ssf2020",
"-m",
"mse",
"-q",
"1",
]
)
# mse and entropy_estimation tested for now
@pytest.mark.parametrize("model", ("ssf2020",))
@pytest.mark.parametrize("quality", ("1", "4", "8"))
@pytest.mark.parametrize("metric", ("mse",))
@pytest.mark.parametrize("entropy_estimation", (True,))
def test_eval_model_pretrained(capsys, model, quality, metric, entropy_estimation):
here = os.path.dirname(__file__)
dirpath = os.path.join(here, "assets/dataset/video")
cmd = [
"pretrained",
dirpath,
here,
"-a",
model,
"-m",
metric,
"-q",
quality,
]
if entropy_estimation:
cmd += ["--entropy-estimation"]
eval_model.main(cmd)
output = capsys.readouterr().out
output = json.loads(output)
expected = os.path.join(
here,
"expected",
f"eval_{int(entropy_estimation)}_{model}_{metric}_{quality}.json",
)
if not os.path.isfile(expected):
if not GENERATE_EXPECTED:
raise RuntimeError(f"Missing expected file {expected}")
with open(expected, "w") as f:
json.dump(output, f)
with open(expected, "r") as f:
expected = json.loads(f.read())
for key in ("name", "description"):
assert expected[key] == output[key]
for key in ("psnr", "ms-ssim", "bpp"):
if key not in expected["results"]:
continue
assert np.allclose(
expected["results"][key], output["results"][key], rtol=1e-4, atol=1e-4
)
# @pytest.mark.parametrize("model_name", ("ssf2020",))
# def test_eval_model_ckpt(tmp_path, model_name):
# here = os.path.dirname(__file__)
# parent = os.path.dirname(here)
# # fake training
# datapath = os.path.join(here, "assets/fakedata/imagefolder")
# spec = importlib.util.spec_from_file_location(
# "examples.train", os.path.join(parent, "examples/train_video.py")
# )
# module = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(module)
# argv = [
# "-d",
# datapath,
# "-e",
# "1",
# "--batch-size",
# "1",
# "--patch-size",
# "48",
# "64",
# "--seed",
# "0",
# "--save",
# ]
# os.chdir(tmp_path)
# module.main(argv)
# checkpoint = "checkpoint_best_loss.pth.tar"
# assert os.path.isfile(checkpoint)
# # update model
# cmd = ["-a", model_name, "-n", "factorized", checkpoint]
# update_model.main(cmd)
# # ckpt evaluation
# dirpath = os.path.join(here, "assets/dataset/image")
# checkpoint = next(f for f in os.listdir(tmp_path) if f.startswith("factorized-"))
# cmd = [
# "checkpoint",
# dirpath,
# "-a",
# "bmshj2018-factorized",
# "-p",
# checkpoint,
# ]
# eval_model.main(cmd)
| [
"torch.manual_seed",
"json.loads",
"numpy.allclose",
"importlib.import_module",
"os.getenv",
"os.path.join",
"random.seed",
"os.path.isfile",
"pytest.mark.parametrize",
"os.path.dirname",
"pytest.raises",
"numpy.random.seed",
"json.dump"
] | [((1832, 1901), 'importlib.import_module', 'importlib.import_module', (['"""compressai.utils.video.eval_model.__main__"""'], {}), "('compressai.utils.video.eval_model.__main__')\n", (1855, 1901), False, 'import importlib\n'), ((1917, 1982), 'importlib.import_module', 'importlib.import_module', (['"""compressai.utils.update_model.__main__"""'], {}), "('compressai.utils.update_model.__main__')\n", (1940, 1982), False, 'import importlib\n'), ((2071, 2101), 'os.getenv', 'os.getenv', (['"""GENERATE_EXPECTED"""'], {}), "('GENERATE_EXPECTED')\n", (2080, 2101), False, 'import os\n'), ((2783, 2829), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model"""', "('ssf2020',)"], {}), "('model', ('ssf2020',))\n", (2806, 2829), False, 'import pytest\n'), ((2831, 2882), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""quality"""', "('1', '4', '8')"], {}), "('quality', ('1', '4', '8'))\n", (2854, 2882), False, 'import pytest\n'), ((2884, 2927), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""metric"""', "('mse',)"], {}), "('metric', ('mse',))\n", (2907, 2927), False, 'import pytest\n'), ((2929, 2983), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""entropy_estimation"""', '(True,)'], {}), "('entropy_estimation', (True,))\n", (2952, 2983), False, 'import pytest\n'), ((2132, 2155), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2149, 2155), False, 'import torch\n'), ((2160, 2177), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2171, 2177), False, 'import random\n'), ((2182, 2202), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2196, 2202), True, 'import numpy as np\n'), ((3079, 3104), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3094, 3104), False, 'import os\n'), ((3119, 3161), 'os.path.join', 'os.path.join', (['here', '"""assets/dataset/video"""'], {}), "(here, 'assets/dataset/video')\n", (3131, 3161), False, 'import os\n'), ((3467, 3485), 'json.loads', 'json.loads', (['output'], {}), '(output)\n', (3477, 3485), False, 'import json\n'), ((2243, 2268), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2256, 2268), False, 'import pytest\n'), ((2316, 2341), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2329, 2341), False, 'import pytest\n'), ((2381, 2406), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2394, 2406), False, 'import pytest\n'), ((2458, 2483), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2471, 2483), False, 'import pytest\n'), ((3642, 3666), 'os.path.isfile', 'os.path.isfile', (['expected'], {}), '(expected)\n', (3656, 3666), False, 'import os\n'), ((4126, 4217), 'numpy.allclose', 'np.allclose', (["expected['results'][key]", "output['results'][key]"], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), "(expected['results'][key], output['results'][key], rtol=0.0001,\n atol=0.0001)\n", (4137, 4217), True, 'import numpy as np\n'), ((3821, 3841), 'json.dump', 'json.dump', (['output', 'f'], {}), '(output, f)\n', (3830, 3841), False, 'import json\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import abel.tools.transform_pairs
n = 100
def plot(profile):
profile = 'profile' + str(profile)
fig, axs = plt.subplots(1, 2, figsize=(6, 2.5))
# fig.suptitle(profile, weight='bold') # figure title (not needed)
eps = 1e-8 # (some profiles do not like exact 0 and 1)
r = np.linspace(0 + eps, 1 - eps, n)
f, a = getattr(abel.tools.transform_pairs, profile)(r)
for i, p in enumerate([f, a]):
ax = axs[i]
ax.set_title(('source', 'projection')[i])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel(('$r$', '$r$')[i])
ax.plot(r, p, color='red')
ax.set_xlim((0, 1.01))
ax.set_ylim(bottom=0)
plt.tight_layout()
#plt.show()
#plt.savefig(profile + '.svg')
| [
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout"
] | [((195, 231), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6, 2.5)'}), '(1, 2, figsize=(6, 2.5))\n', (207, 231), True, 'import matplotlib.pyplot as plt\n'), ((374, 406), 'numpy.linspace', 'np.linspace', (['(0 + eps)', '(1 - eps)', 'n'], {}), '(0 + eps, 1 - eps, n)\n', (385, 406), True, 'import numpy as np\n'), ((807, 825), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (823, 825), True, 'import matplotlib.pyplot as plt\n')] |
# Import modules and libraries
import torch
from torch.utils.data import DataLoader
import csv
import pickle
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import glob
from skimage.io import imread
import time
import argparse
from DeepSTORM3D.data_utils import generate_batch, complex_to_tensor, ExpDataset, sort_names_tif
from DeepSTORM3D.cnn_utils import LocalizationCNN
from DeepSTORM3D.vis_utils import ShowMaskPSF, ShowRecovery3D, ShowLossJaccardAtEndOfEpoch
from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput
from DeepSTORM3D.physics_utils import EmittersToPhases
from DeepSTORM3D.postprocess_utils import Postprocess
from DeepSTORM3D.assessment_utils import calc_jaccard_rmse
from DeepSTORM3D.helper_utils import normalize_01, xyz_to_nm
def test_model(path_results, postprocess_params, exp_imgs_path=None, seed=66):
# close all existing plots
plt.close("all")
# load assumed setup parameters
path_params_pickle = path_results + 'setup_params.pickle'
with open(path_params_pickle, 'rb') as handle:
setup_params = pickle.load(handle)
# run on GPU if available
device = setup_params['device']
torch.backends.cudnn.benchmark = True
# phase term for PSF visualization
vis_term, zvis = setup_params['vis_term'], setup_params['zvis']
# phase mask for visualization
mask_param = torch.from_numpy(setup_params['mask_init']).to(device)
# plot used mask and PSF
plt.figure(figsize=(10,5))
ShowMaskPSF(mask_param, vis_term, zvis)
# load learning results
path_learning_pickle = path_results + 'learning_results.pickle'
with open(path_learning_pickle, 'rb') as handle:
learning_results = pickle.load(handle)
# plot metrics evolution in training for debugging
plt.figure()
ShowLossJaccardAtEndOfEpoch(learning_results, learning_results['epoch_converged'])
# build model and convert all the weight tensors to GPU is available
cnn = LocalizationCNN(setup_params)
cnn.to(device)
# load learned weights
cnn.load_state_dict(torch.load(path_results + 'weights_best_loss.pkl'))
# post-processing module on CPU/GPU
thresh, radius = postprocess_params['thresh'], postprocess_params['radius']
postprocessing_module = Postprocess(thresh, radius, setup_params)
# if no experimental imgs are supplied then sample a random example
if exp_imgs_path is None:
# visualization module to visualize the 3D positions recovered by the net as images
psf_module_vis = PhysicalLayerVisualization(setup_params, 0, 0, 1)
# ==============================================================================================================
# generate a simulated test image
# ==============================================================================================================
# set random number generators given the seed
torch.manual_seed(seed)
np.random.seed(seed)
# sample a single piece of data
xyz_gt, nphotons_gt = generate_batch(1, setup_params)
# calculate phases and cast them to device
phases_np = EmittersToPhases(xyz_gt, setup_params)
phases_emitter_gt = complex_to_tensor(phases_np).to(device)
# initialize the physical layer that encodes xyz into noisy PSFs
psf_module_net = PhysicalLayerVisualization(setup_params, 1, 1, 0)
# pass xyz and N through the physical layer to get the simulated image
nphotons_gt = torch.from_numpy(nphotons_gt).type(torch.FloatTensor).to(device)
test_input_im = psf_module_net(mask_param, phases_emitter_gt, nphotons_gt)
# normalize image according to the training setting
if setup_params['project_01'] is True:
test_input_im = normalize_01(test_input_im)
else:
test_input_im = (test_input_im - setup_params['global_factors'][0]) / setup_params['global_factors'][1]
# alter the mean and std to match the training set
if setup_params['project_01'] is True:
test_input_im = (test_input_im - test_input_im.mean())/test_input_im.std()
test_input_im = test_input_im*setup_params['train_stats'][1] + setup_params['train_stats'][0]
# ==============================================================================================================
# predict the positions by post-processing the net's output
# ==============================================================================================================
# prediction using model
cnn.eval()
with torch.set_grad_enabled(False):
pred_volume = cnn(test_input_im)
# post-process predicted volume
tpost_start = time.time()
xyz_rec, conf_rec = postprocessing_module(pred_volume)
tpost_elapsed = time.time() - tpost_start
print('Post-processing complete in {:.6f}s'.format(tpost_elapsed))
# time prediction using model after first forward pass which is slow
cnn.eval()
tinf_start = time.time()
with torch.set_grad_enabled(False):
pred_volume = cnn(test_input_im)
tinf_elapsed = time.time() - tinf_start
print('Inference complete in {:.6f}s'.format(tinf_elapsed))
# take out dim emitters from GT
if setup_params['nsig_unif'] is False:
nemitters = xyz_gt.shape[1]
if np.not_equal(nemitters, 1):
nphotons_gt = np.squeeze(nphotons_gt, 0)
xyz_gt = xyz_gt[:, nphotons_gt > setup_params['nsig_thresh'], :]
# plot recovered 3D positions compared to GT
plt.figure()
xyz_gt = np.squeeze(xyz_gt, 0)
ShowRecovery3D(xyz_gt, xyz_rec)
# report the number of found emitters
print('Found {:d} emitters out of {:d}'.format(xyz_rec.shape[0], xyz_gt.shape[0]))
# calculate quantitative metrics assuming a matching radius of 100 nm
jaccard_index, RMSE_xy, RMSE_z, _ = calc_jaccard_rmse(xyz_gt, xyz_rec, 0.1)
# report quantitative metrics
print('Jaccard Index = {:.2f}%, Lateral RMSE = {:.2f} nm, Axial RMSE = {:.2f}'.format(
jaccard_index*100, RMSE_xy*1e3, RMSE_z*1e3))
# ==============================================================================================================
# compare the network positions to the input image
# ==============================================================================================================
# turn recovered positions into phases
xyz_rec = np.expand_dims(xyz_rec, 0)
phases_np = EmittersToPhases(xyz_rec, setup_params)
phases_emitter_rec = complex_to_tensor(phases_np).to(device)
# use a uniform number of photons for recovery visualization
nphotons_rec = 5000 * torch.ones((1, xyz_rec.shape[1])).to(device)
# generate the recovered image by the net
test_pred_im = psf_module_vis(mask_param, phases_emitter_rec, nphotons_rec)
# compare the recovered image to the input
ShowRecNetInput(test_input_im, 'Simulated Input to Localization Net')
ShowRecNetInput(test_pred_im, 'Recovered Input Matching Net Localizations')
# return recovered locations and net confidence
return xyz_rec, conf_rec
else:
# read all imgs in the experimental data directory assuming ".tif" extension
img_names = glob.glob(exp_imgs_path + '*.tif')
img_names = sort_names_tif(img_names)
# if given only 1 image then show xyz in 3D and recovered image
if len(img_names) == 1:
# ==========================================================================================================
# read experimental image and normalize it
# ==========================================================================================================
# read exp image in uint16
exp_im = imread(img_names[0])
exp_img = exp_im.astype("float32")
# normalize image according to the training setting
if setup_params['project_01'] is True:
exp_img = normalize_01(exp_img)
else:
exp_img = (exp_img - setup_params['global_factors'][0]) / setup_params['global_factors'][1]
# alter the mean and std to match the training set
if setup_params['project_01'] is True:
exp_img = (exp_img - exp_img.mean()) / exp_img.std()
exp_img = exp_img * setup_params['train_stats'][1] + setup_params['train_stats'][0]
# turn image into torch tensor with 1 channel on GPU
exp_img = np.expand_dims(exp_img, 0)
exp_img = np.expand_dims(exp_img, 0)
exp_tensor = torch.FloatTensor(exp_img).to(device)
# ==========================================================================================================
# predict the positions by post-processing the net's output
# ==========================================================================================================
# prediction using model
cnn.eval()
with torch.set_grad_enabled(False):
pred_volume = cnn(exp_tensor)
# post-process predicted volume
tpost_start = time.time()
xyz_rec, conf_rec = postprocessing_module(pred_volume)
tpost_elapsed = time.time() - tpost_start
print('Post-processing complete in {:.6f}s'.format(tpost_elapsed))
# time prediction using model after first forward pass which is slow
cnn.eval()
tinf_start = time.time()
with torch.set_grad_enabled(False):
pred_volume = cnn(exp_tensor)
tinf_elapsed = time.time() - tinf_start
print('Inference complete in {:.6f}s'.format(tinf_elapsed))
# plot recovered 3D positions compared to GT
plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(xyz_rec[:, 0], xyz_rec[:, 1], xyz_rec[:, 2], c='r', marker='^', label='DL', depthshade=False)
ax.set_xlabel('X [um]')
ax.set_ylabel('Y [um]')
ax.set_zlabel('Z [um]')
plt.title('3D Recovered Positions')
# report the number of found emitters
print('Found {:d} emitters'.format(xyz_rec.shape[0]))
# ==========================================================================================================
# compare the network positions to the input image
# ==========================================================================================================
# visualization module to visualize the 3D positions recovered by the net as images
H, W = exp_im.shape
setup_params['H'], setup_params['W'] = H, W
psf_module_vis = PhysicalLayerVisualization(setup_params, 0, 0, 1)
# turn recovered positions into phases
xyz_rec = np.expand_dims(xyz_rec, 0)
phases_np = EmittersToPhases(xyz_rec, setup_params)
phases_emitter_rec = complex_to_tensor(phases_np).to(device)
# use a uniform number of photons for recovery visualization
nphotons_rec = 5000 * torch.ones((1, xyz_rec.shape[1])).to(device)
# generate the recovered image by the net
exp_pred_im = psf_module_vis(mask_param, phases_emitter_rec, nphotons_rec)
# compare the recovered image to the input
ShowRecNetInput(exp_tensor, 'Experimental Input to Localization Net')
ShowRecNetInput(exp_pred_im, 'Recovered Input Matching Net Localizations')
# return recovered locations and net confidence
return xyz_rec, conf_rec
else:
# ==========================================================================================================
# create a data generator to efficiently load imgs for temporal acquisitions
# ==========================================================================================================
# instantiate the data class and create a data loader for testing
num_imgs = len(img_names)
exp_test_set = ExpDataset(img_names, setup_params)
exp_generator = DataLoader(exp_test_set, batch_size=1, shuffle=False)
# time the entire dataset analysis
tall_start = time.time()
# needed pixel-size for plotting if only few images are in the folder
visualize_flag, pixel_size_FOV = num_imgs < 100, setup_params['pixel_size_FOV']
# needed recovery pixel size and minimal axial height for turning ums to nms
psize_rec_xy, zmin = setup_params['pixel_size_rec'], setup_params['zmin']
# process all experimental images
cnn.eval()
results = np.array(['frame', 'x [nm]', 'y [nm]', 'z [nm]', 'intensity [au]'])
with torch.set_grad_enabled(False):
for im_ind, exp_im_tensor in enumerate(exp_generator):
# print current image number
print('Processing Image [%d/%d]' % (im_ind + 1, num_imgs))
# time each frame
tfrm_start = time.time()
# transfer normalized image to device (CPU/GPU)
exp_im_tensor = exp_im_tensor.to(device)
# predicted volume using model
pred_volume = cnn(exp_im_tensor)
# post-process result to get the xyz coordinates and their confidence
xyz_rec, conf_rec = postprocessing_module(pred_volume)
# time it takes to analyze a single frame
tfrm_end = time.time() - tfrm_start
# if this is the first image, get the dimensions and the relevant center for plotting
if im_ind == 0:
N, C, H, W = exp_im_tensor.size()
ch, cw = np.floor(H / 2), np.floor(W / 2)
# if prediction is empty then set number fo found emitters to 0
# otherwise generate the frame column and append results for saving
if xyz_rec is None:
nemitters = 0
else:
nemitters = xyz_rec.shape[0]
frm_rec = (im_ind + 1)*np.ones(nemitters)
xyz_save = xyz_to_nm(xyz_rec, H*2, W*2, psize_rec_xy, zmin)
results = np.vstack((results, np.column_stack((frm_rec, xyz_save, conf_rec))))
# if the number of imgs is small then plot each image in the loop with localizations
if visualize_flag:
# show input image
fig100 = plt.figure(100)
im_np = np.squeeze(exp_im_tensor.cpu().numpy())
imfig = plt.imshow(im_np, cmap='gray')
plt.plot(xyz_rec[:, 0] / pixel_size_FOV + cw, xyz_rec[:, 1] / pixel_size_FOV + ch, 'r+')
plt.title('Single frame complete in {:.2f}s, found {:d} emitters'.format(tfrm_end, nemitters))
fig100.colorbar(imfig)
plt.draw()
plt.pause(0.05)
plt.clf()
else:
# print status
print('Single frame complete in {:.6f}s, found {:d} emitters'.format(tfrm_end, nemitters))
# print the time it took for the entire analysis
tall_end = time.time() - tall_start
print('=' * 50)
print('Analysis complete in {:.0f}h {:.0f}m {:.0f}s'.format(
tall_end // 3600, np.floor((tall_end / 3600 - tall_end // 3600) * 60), tall_end % 60))
print('=' * 50)
# write the results to a csv file named "localizations.csv" under the exp img folder
row_list = results.tolist()
with open(exp_imgs_path + 'localizations.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(row_list)
# return the localization results for the last image
return xyz_rec, conf_rec
if __name__ == '__main__':
# start a parser
parser = argparse.ArgumentParser()
# previously trained model
parser.add_argument('--path_results', help='path to the results folder for the pre-trained model', required=True)
# previously trained model
parser.add_argument('--postprocessing_params', help='post-processing dictionary parameters', required=True)
# path to the experimental images
parser.add_argument('--exp_imgs_path', default=None, help='path to the experimental test images')
# seed to run model
parser.add_argument('--seed', default=66, help='seed for random test data generation')
# parse the input arguments
args = parser.parse_args()
# run the data generation process
xyz_rec, conf_rec = test_model(args.path_results, args.postprocessing_params, args.exp_imgs_path, args.seed)
| [
"DeepSTORM3D.assessment_utils.calc_jaccard_rmse",
"DeepSTORM3D.data_utils.ExpDataset",
"DeepSTORM3D.postprocess_utils.Postprocess",
"DeepSTORM3D.physics_utils.EmittersToPhases",
"DeepSTORM3D.data_utils.generate_batch",
"numpy.column_stack",
"torch.from_numpy",
"numpy.not_equal",
"numpy.array",
"De... | [((146, 169), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (160, 169), False, 'import matplotlib\n'), ((934, 950), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (943, 950), True, 'import matplotlib.pyplot as plt\n'), ((1503, 1530), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1513, 1530), True, 'import matplotlib.pyplot as plt\n'), ((1534, 1573), 'DeepSTORM3D.vis_utils.ShowMaskPSF', 'ShowMaskPSF', (['mask_param', 'vis_term', 'zvis'], {}), '(mask_param, vis_term, zvis)\n', (1545, 1573), False, 'from DeepSTORM3D.vis_utils import ShowMaskPSF, ShowRecovery3D, ShowLossJaccardAtEndOfEpoch\n'), ((1831, 1843), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1841, 1843), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1935), 'DeepSTORM3D.vis_utils.ShowLossJaccardAtEndOfEpoch', 'ShowLossJaccardAtEndOfEpoch', (['learning_results', "learning_results['epoch_converged']"], {}), "(learning_results, learning_results[\n 'epoch_converged'])\n", (1875, 1935), False, 'from DeepSTORM3D.vis_utils import ShowMaskPSF, ShowRecovery3D, ShowLossJaccardAtEndOfEpoch\n'), ((2015, 2044), 'DeepSTORM3D.cnn_utils.LocalizationCNN', 'LocalizationCNN', (['setup_params'], {}), '(setup_params)\n', (2030, 2044), False, 'from DeepSTORM3D.cnn_utils import LocalizationCNN\n'), ((2317, 2358), 'DeepSTORM3D.postprocess_utils.Postprocess', 'Postprocess', (['thresh', 'radius', 'setup_params'], {}), '(thresh, radius, setup_params)\n', (2328, 2358), False, 'from DeepSTORM3D.postprocess_utils import Postprocess\n'), ((16674, 16699), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16697, 16699), False, 'import argparse\n'), ((1124, 1143), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1135, 1143), False, 'import pickle\n'), ((1751, 1770), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1762, 1770), False, 'import pickle\n'), ((2116, 2166), 'torch.load', 'torch.load', (["(path_results + 'weights_best_loss.pkl')"], {}), "(path_results + 'weights_best_loss.pkl')\n", (2126, 2166), False, 'import torch\n'), ((2580, 2629), 'DeepSTORM3D.vis_utils.PhysicalLayerVisualization', 'PhysicalLayerVisualization', (['setup_params', '(0)', '(0)', '(1)'], {}), '(setup_params, 0, 0, 1)\n', (2606, 2629), False, 'from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput\n'), ((2978, 3001), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2995, 3001), False, 'import torch\n'), ((3010, 3030), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3024, 3030), True, 'import numpy as np\n'), ((3102, 3133), 'DeepSTORM3D.data_utils.generate_batch', 'generate_batch', (['(1)', 'setup_params'], {}), '(1, setup_params)\n', (3116, 3133), False, 'from DeepSTORM3D.data_utils import generate_batch, complex_to_tensor, ExpDataset, sort_names_tif\n'), ((3206, 3244), 'DeepSTORM3D.physics_utils.EmittersToPhases', 'EmittersToPhases', (['xyz_gt', 'setup_params'], {}), '(xyz_gt, setup_params)\n', (3222, 3244), False, 'from DeepSTORM3D.physics_utils import EmittersToPhases\n'), ((3412, 3461), 'DeepSTORM3D.vis_utils.PhysicalLayerVisualization', 'PhysicalLayerVisualization', (['setup_params', '(1)', '(1)', '(0)'], {}), '(setup_params, 1, 1, 0)\n', (3438, 3461), False, 'from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput\n'), ((4822, 4833), 'time.time', 'time.time', ([], {}), '()\n', (4831, 4833), False, 'import time\n'), ((5140, 5151), 'time.time', 'time.time', ([], {}), '()\n', (5149, 5151), False, 'import time\n'), ((5728, 5740), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5738, 5740), True, 'import matplotlib.pyplot as plt\n'), ((5758, 5779), 'numpy.squeeze', 'np.squeeze', (['xyz_gt', '(0)'], {}), '(xyz_gt, 0)\n', (5768, 5779), True, 'import numpy as np\n'), ((5788, 5819), 'DeepSTORM3D.vis_utils.ShowRecovery3D', 'ShowRecovery3D', (['xyz_gt', 'xyz_rec'], {}), '(xyz_gt, xyz_rec)\n', (5802, 5819), False, 'from DeepSTORM3D.vis_utils import ShowMaskPSF, ShowRecovery3D, ShowLossJaccardAtEndOfEpoch\n'), ((6081, 6120), 'DeepSTORM3D.assessment_utils.calc_jaccard_rmse', 'calc_jaccard_rmse', (['xyz_gt', 'xyz_rec', '(0.1)'], {}), '(xyz_gt, xyz_rec, 0.1)\n', (6098, 6120), False, 'from DeepSTORM3D.assessment_utils import calc_jaccard_rmse\n'), ((6680, 6706), 'numpy.expand_dims', 'np.expand_dims', (['xyz_rec', '(0)'], {}), '(xyz_rec, 0)\n', (6694, 6706), True, 'import numpy as np\n'), ((6727, 6766), 'DeepSTORM3D.physics_utils.EmittersToPhases', 'EmittersToPhases', (['xyz_rec', 'setup_params'], {}), '(xyz_rec, setup_params)\n', (6743, 6766), False, 'from DeepSTORM3D.physics_utils import EmittersToPhases\n'), ((7176, 7245), 'DeepSTORM3D.vis_utils.ShowRecNetInput', 'ShowRecNetInput', (['test_input_im', '"""Simulated Input to Localization Net"""'], {}), "(test_input_im, 'Simulated Input to Localization Net')\n", (7191, 7245), False, 'from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput\n'), ((7254, 7329), 'DeepSTORM3D.vis_utils.ShowRecNetInput', 'ShowRecNetInput', (['test_pred_im', '"""Recovered Input Matching Net Localizations"""'], {}), "(test_pred_im, 'Recovered Input Matching Net Localizations')\n", (7269, 7329), False, 'from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput\n'), ((7537, 7571), 'glob.glob', 'glob.glob', (["(exp_imgs_path + '*.tif')"], {}), "(exp_imgs_path + '*.tif')\n", (7546, 7571), False, 'import glob\n'), ((7592, 7617), 'DeepSTORM3D.data_utils.sort_names_tif', 'sort_names_tif', (['img_names'], {}), '(img_names)\n', (7606, 7617), False, 'from DeepSTORM3D.data_utils import generate_batch, complex_to_tensor, ExpDataset, sort_names_tif\n'), ((1414, 1457), 'torch.from_numpy', 'torch.from_numpy', (["setup_params['mask_init']"], {}), "(setup_params['mask_init'])\n", (1430, 1457), False, 'import torch\n'), ((3848, 3875), 'DeepSTORM3D.helper_utils.normalize_01', 'normalize_01', (['test_input_im'], {}), '(test_input_im)\n', (3860, 3875), False, 'from DeepSTORM3D.helper_utils import normalize_01, xyz_to_nm\n'), ((4683, 4712), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (4705, 4712), False, 'import torch\n'), ((4921, 4932), 'time.time', 'time.time', ([], {}), '()\n', (4930, 4932), False, 'import time\n'), ((5165, 5194), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (5187, 5194), False, 'import torch\n'), ((5264, 5275), 'time.time', 'time.time', ([], {}), '()\n', (5273, 5275), False, 'import time\n'), ((5500, 5526), 'numpy.not_equal', 'np.not_equal', (['nemitters', '(1)'], {}), '(nemitters, 1)\n', (5512, 5526), True, 'import numpy as np\n'), ((8083, 8103), 'skimage.io.imread', 'imread', (['img_names[0]'], {}), '(img_names[0])\n', (8089, 8103), False, 'from skimage.io import imread\n'), ((8813, 8839), 'numpy.expand_dims', 'np.expand_dims', (['exp_img', '(0)'], {}), '(exp_img, 0)\n', (8827, 8839), True, 'import numpy as np\n'), ((8862, 8888), 'numpy.expand_dims', 'np.expand_dims', (['exp_img', '(0)'], {}), '(exp_img, 0)\n', (8876, 8888), True, 'import numpy as np\n'), ((9493, 9504), 'time.time', 'time.time', ([], {}), '()\n', (9502, 9504), False, 'import time\n'), ((9835, 9846), 'time.time', 'time.time', ([], {}), '()\n', (9844, 9846), False, 'import time\n'), ((10135, 10147), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10145, 10147), True, 'import matplotlib.pyplot as plt\n'), ((10165, 10190), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (10173, 10190), True, 'import matplotlib.pyplot as plt\n'), ((10428, 10463), 'matplotlib.pyplot.title', 'plt.title', (['"""3D Recovered Positions"""'], {}), "('3D Recovered Positions')\n", (10437, 10463), True, 'import matplotlib.pyplot as plt\n'), ((11101, 11150), 'DeepSTORM3D.vis_utils.PhysicalLayerVisualization', 'PhysicalLayerVisualization', (['setup_params', '(0)', '(0)', '(1)'], {}), '(setup_params, 0, 0, 1)\n', (11127, 11150), False, 'from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput\n'), ((11225, 11251), 'numpy.expand_dims', 'np.expand_dims', (['xyz_rec', '(0)'], {}), '(xyz_rec, 0)\n', (11239, 11251), True, 'import numpy as np\n'), ((11276, 11315), 'DeepSTORM3D.physics_utils.EmittersToPhases', 'EmittersToPhases', (['xyz_rec', 'setup_params'], {}), '(xyz_rec, setup_params)\n', (11292, 11315), False, 'from DeepSTORM3D.physics_utils import EmittersToPhases\n'), ((11752, 11821), 'DeepSTORM3D.vis_utils.ShowRecNetInput', 'ShowRecNetInput', (['exp_tensor', '"""Experimental Input to Localization Net"""'], {}), "(exp_tensor, 'Experimental Input to Localization Net')\n", (11767, 11821), False, 'from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput\n'), ((11834, 11908), 'DeepSTORM3D.vis_utils.ShowRecNetInput', 'ShowRecNetInput', (['exp_pred_im', '"""Recovered Input Matching Net Localizations"""'], {}), "(exp_pred_im, 'Recovered Input Matching Net Localizations')\n", (11849, 11908), False, 'from DeepSTORM3D.vis_utils import PhysicalLayerVisualization, ShowRecNetInput\n'), ((12498, 12533), 'DeepSTORM3D.data_utils.ExpDataset', 'ExpDataset', (['img_names', 'setup_params'], {}), '(img_names, setup_params)\n', (12508, 12533), False, 'from DeepSTORM3D.data_utils import generate_batch, complex_to_tensor, ExpDataset, sort_names_tif\n'), ((12562, 12615), 'torch.utils.data.DataLoader', 'DataLoader', (['exp_test_set'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(exp_test_set, batch_size=1, shuffle=False)\n', (12572, 12615), False, 'from torch.utils.data import DataLoader\n'), ((12689, 12700), 'time.time', 'time.time', ([], {}), '()\n', (12698, 12700), False, 'import time\n'), ((13144, 13211), 'numpy.array', 'np.array', (["['frame', 'x [nm]', 'y [nm]', 'z [nm]', 'intensity [au]']"], {}), "(['frame', 'x [nm]', 'y [nm]', 'z [nm]', 'intensity [au]'])\n", (13152, 13211), True, 'import numpy as np\n'), ((3273, 3301), 'DeepSTORM3D.data_utils.complex_to_tensor', 'complex_to_tensor', (['phases_np'], {}), '(phases_np)\n', (3290, 3301), False, 'from DeepSTORM3D.data_utils import generate_batch, complex_to_tensor, ExpDataset, sort_names_tif\n'), ((5558, 5584), 'numpy.squeeze', 'np.squeeze', (['nphotons_gt', '(0)'], {}), '(nphotons_gt, 0)\n', (5568, 5584), True, 'import numpy as np\n'), ((6796, 6824), 'DeepSTORM3D.data_utils.complex_to_tensor', 'complex_to_tensor', (['phases_np'], {}), '(phases_np)\n', (6813, 6824), False, 'from DeepSTORM3D.data_utils import generate_batch, complex_to_tensor, ExpDataset, sort_names_tif\n'), ((8293, 8314), 'DeepSTORM3D.helper_utils.normalize_01', 'normalize_01', (['exp_img'], {}), '(exp_img)\n', (8305, 8314), False, 'from DeepSTORM3D.helper_utils import normalize_01, xyz_to_nm\n'), ((9345, 9374), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (9367, 9374), False, 'import torch\n'), ((9600, 9611), 'time.time', 'time.time', ([], {}), '()\n', (9609, 9611), False, 'import time\n'), ((9864, 9893), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (9886, 9893), False, 'import torch\n'), ((9968, 9979), 'time.time', 'time.time', ([], {}), '()\n', (9977, 9979), False, 'import time\n'), ((13229, 13258), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (13251, 13258), False, 'import torch\n'), ((15942, 15953), 'time.time', 'time.time', ([], {}), '()\n', (15951, 15953), False, 'import time\n'), ((16447, 16463), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (16457, 16463), False, 'import csv\n'), ((6936, 6969), 'torch.ones', 'torch.ones', (['(1, xyz_rec.shape[1])'], {}), '((1, xyz_rec.shape[1]))\n', (6946, 6969), False, 'import torch\n'), ((8914, 8940), 'torch.FloatTensor', 'torch.FloatTensor', (['exp_img'], {}), '(exp_img)\n', (8931, 8940), False, 'import torch\n'), ((11349, 11377), 'DeepSTORM3D.data_utils.complex_to_tensor', 'complex_to_tensor', (['phases_np'], {}), '(phases_np)\n', (11366, 11377), False, 'from DeepSTORM3D.data_utils import generate_batch, complex_to_tensor, ExpDataset, sort_names_tif\n'), ((13532, 13543), 'time.time', 'time.time', ([], {}), '()\n', (13541, 13543), False, 'import time\n'), ((16102, 16153), 'numpy.floor', 'np.floor', (['((tall_end / 3600 - tall_end // 3600) * 60)'], {}), '((tall_end / 3600 - tall_end // 3600) * 60)\n', (16110, 16153), True, 'import numpy as np\n'), ((3564, 3593), 'torch.from_numpy', 'torch.from_numpy', (['nphotons_gt'], {}), '(nphotons_gt)\n', (3580, 3593), False, 'import torch\n'), ((11497, 11530), 'torch.ones', 'torch.ones', (['(1, xyz_rec.shape[1])'], {}), '((1, xyz_rec.shape[1]))\n', (11507, 11530), False, 'import torch\n'), ((14039, 14050), 'time.time', 'time.time', ([], {}), '()\n', (14048, 14050), False, 'import time\n'), ((14762, 14814), 'DeepSTORM3D.helper_utils.xyz_to_nm', 'xyz_to_nm', (['xyz_rec', '(H * 2)', '(W * 2)', 'psize_rec_xy', 'zmin'], {}), '(xyz_rec, H * 2, W * 2, psize_rec_xy, zmin)\n', (14771, 14814), False, 'from DeepSTORM3D.helper_utils import normalize_01, xyz_to_nm\n'), ((15136, 15151), 'matplotlib.pyplot.figure', 'plt.figure', (['(100)'], {}), '(100)\n', (15146, 15151), True, 'import matplotlib.pyplot as plt\n'), ((15256, 15286), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_np'], {'cmap': '"""gray"""'}), "(im_np, cmap='gray')\n", (15266, 15286), True, 'import matplotlib.pyplot as plt\n'), ((15311, 15403), 'matplotlib.pyplot.plot', 'plt.plot', (['(xyz_rec[:, 0] / pixel_size_FOV + cw)', '(xyz_rec[:, 1] / pixel_size_FOV + ch)', '"""r+"""'], {}), "(xyz_rec[:, 0] / pixel_size_FOV + cw, xyz_rec[:, 1] /\n pixel_size_FOV + ch, 'r+')\n", (15319, 15403), True, 'import matplotlib.pyplot as plt\n'), ((15590, 15600), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (15598, 15600), True, 'import matplotlib.pyplot as plt\n'), ((15625, 15640), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.05)'], {}), '(0.05)\n', (15634, 15640), True, 'import matplotlib.pyplot as plt\n'), ((15665, 15674), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15672, 15674), True, 'import matplotlib.pyplot as plt\n'), ((14298, 14313), 'numpy.floor', 'np.floor', (['(H / 2)'], {}), '(H / 2)\n', (14306, 14313), True, 'import numpy as np\n'), ((14315, 14330), 'numpy.floor', 'np.floor', (['(W / 2)'], {}), '(W / 2)\n', (14323, 14330), True, 'import numpy as np\n'), ((14708, 14726), 'numpy.ones', 'np.ones', (['nemitters'], {}), '(nemitters)\n', (14715, 14726), True, 'import numpy as np\n'), ((14865, 14911), 'numpy.column_stack', 'np.column_stack', (['(frm_rec, xyz_save, conf_rec)'], {}), '((frm_rec, xyz_save, conf_rec))\n', (14880, 14911), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.