input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
= "{0} about as intelligent as a freshman in the high school of your choice; loyal, devoted, honest, and too easily screwed over by bastards like {1}.".format(target, sender)
return msg
def amber3(self):
if lt == 0 and le == 0:
msg = "Talk is cheap, whiskey costs money."
elif lt > 0 and le == 0:
msg = "Talk is cheap, {0}, whiskey costs money.".format(target)
elif lt == 0 and le > 0:
msg = "Talk is fucking cheap, whiskey costs money."
elif lt > 0 and le > 0:
msg = "Talk is fucking cheap, {0}, whiskey costs money.".format(
target)
return msg
def apple(self):
if lt == 0 and ls == 0:
msg = "No you fucking can't do it your way! We don't give a fuck if it's better, you do it our fucking way or you fuck off!"
elif lt > 0 and ls == 0:
msg = "No {0}, you fucking can't do it your way! We don't give a fuck if it's better, you do it our fucking way or you fuck off!".format(
target)
elif lt > 0 and ls > 0:
msg = "No {0}, you fucking can't do it your way! We don't give a fuck if it's better, you do it our fucking way or you fuck off! -- {1}".format(
target, sender)
else:
msg = "No {0}, you fucking can't do it your way! We don't give a fuck if it's better, you do it our fucking way or you fuck off!".format(
target)
return msg
# From _Drive Angry_ (2011):
def badge(self):
if lt == 0 and le == 0:
msg = "You know what this badge means? Federal Bureau of get the fuck outta my way!"
if lt > 0 and le == 0:
msg = "You know what this badge means, {0}? Federal Bureau of get the fuck outta my way!".format(
target)
if lt > 0 and le > 0:
msg = "You know what this badge means, {0}? {1} of get the fuck outta my way!".format(
target, extra)
else:
msg = "You know what this badge means? Federal Bureau of get the fuck outta my way!"
return msg
def ballmer(self):
if lt == 0 and le == 0 and ls == 0:
msg = "Ballmer Notes: This option requires the first target specified with --name and the second (usually a company or organisation) with --extra (sender optional). For a gender neutral version use ballmerc on --fuck, for a plural version use ballmers on --fuck."
elif lt > 0 and le == 0 and ls == 0:
msg = "Fucking {0} is a fucking pussy. I'm going to bury that guy, I have done it before and I will do it again. I'm going to fucking kill {1}.".format(
target, target)
elif lt > 0 and le > 0 and ls == 0:
msg = "Fucking {0} is a fucking pussy. I'm going to bury that guy, I have done it before and I will do it again. I'm going to fucking kill {1}.".format(
target, extra)
elif lt > 0 and le == 0 and ls > 0:
msg = "Fucking {0} is a fucking pussy. I'm going to bury that guy, I have done it before and I will do it again. I'm going to fucking kill {1}. -- {2}".format(
target, target, sender)
elif lt > 0 and le > 0 and ls > 0:
msg = "Fucking {0} is a fucking pussy. I'm going to bury that guy, I have done it before and I will do it again. I'm going to fucking kill {1}. -- {2}".format(
target, extra, sender)
return msg
def ballmerc(self):
if lt == 0 and le == 0 and ls == 0:
msg = "Ballmer Notes: This option requires the first target specified with --name and the second (usually a company or organisation) with --extra (sender optional). This is the gender neutral (and harsher) version, for a plural version on --name use ballmers on --fuck."
elif lt > 0 and le == 0 and ls == 0:
msg = "Fucking {0} is a little fucking bitch. I'm going to bury that cunt, I have done it before and I will do it again. I'm going to fucking kill {1}.".format(
target, target)
elif lt > 0 and le > 0 and ls == 0:
msg = "Fucking {0} is a little fucking bitch. I'm going to bury that cunt, I have done it before and I will do it again. I'm going to fucking kill {1}.".format(
target, extra)
elif lt > 0 and le == 0 and ls > 0:
msg = "Fucking {0} is a little fucking bitch. I'm going to bury that cunt, I have done it before and I will do it again. I'm going to fucking kill {1}. -- {2}".format(
target, target, sender)
elif lt > 0 and le > 0 and ls > 0:
msg = "Fucking {0} is a little fucking bitch. I'm going to bury that cunt, I have done it before and I will do it again. I'm going to fucking kill {1}. -- {2}".format(
target, extra, sender)
return msg
def ballmers(self):
if lt == 0 and le == 0 and ls == 0:
msg = "Plural Ballmer Notes: This option requires the first targets specified with --name in quotation marks (e.g. 'name1 and name2' or 'name1, name2 and name3') and the second (usually a company or organisation) with --extra (sender optional)."
elif lt > 0 and le == 0 and ls == 0:
msg = "Fucking {0} are fucking pussies. I'm going to bury those guys, I have done it before and I will do it again. I'm going to fucking kill {1}.".format(
target, target)
elif lt > 0 and le > 0 and ls == 0:
msg = "Fucking {0} are fucking pussies. I'm going to bury those guys, I have done it before and I will do it again. I'm going to fucking kill {1}.".format(
target, extra)
elif lt > 0 and le == 0 and ls > 0:
msg = "Fucking {0} are fucking pussies. I'm going to bury those guys, I have done it before and I will do it again. I'm going to fucking kill {1}. -- {2}".format(
target, target, sender)
elif lt > 0 and le > 0 and ls > 0:
msg = "Fucking {0} are fucking pussies. I'm going to bury those guys, I have done it before and I will do it again. I'm going to fucking kill {1}. -- {2}".format(
target, extra, sender)
return msg
def bbm(self):
if lt == 0:
msg = "Big bad motherfucker."
elif lt == 0 and ls > 0:
msg = "{0} is a big bad motherfucker.".format(sender)
elif lt > 0 and ls > 0:
msg = "{0}, {1} is a big bad motherfucker.".format(target, sender)
else:
msg = "{0}, {1} is a big bad motherfucker.".format(target, sender)
return msg
def because(self): # wbfu option uses full stops, otherwise the same.
if lt == 0:
msg = "Because fuck you, that's why!"
elif lt > 0:
msg = "Because fuck you, {0}, that's why!".format(target)
return msg
def boomer(self):
if lt == 0 and ls == 0:
msg = "Shut the fuck up, boomer!"
elif lt > 0 and ls == 0:
msg = "{0}, shut the fuck up boomer!".format(target)
elif lt == 0 and ls > 0:
msg = "Shut the fuck up, you fucking boomer!"
elif lt > 0 and ls > 0:
msg = "Shut the fuck up {0}, you fucking boomer!".format(target)
else:
msg = "Shut the fuck up, you fucking boomer!"
return msg
def bus(self):
if lt == 0:
msg = "Christ on a bendy-bus, don't be such a fucking faff-arse."
| |
<reponame>siyuan-chen/la_forge
#!/usr/bin/env python
import os.path
import corner
import matplotlib.pyplot as plt
import numpy as np
from . import utils
__all__ = ['determine_if_limit',
'get_rn_freqs',
'get_Tspan',
'plot_rednoise_spectrum',
'plot_powerlaw',
'plot_tprocess',
'plot_free_spec',
]
secperyr = 365.25*24*3600
fyr = 1./secperyr
def determine_if_limit(vals, threshold=0.1, minval=-10, lower_q=0.3):
"""
Function to determine if an array or list of values is sufficiently
separate from the minimum value.
Parameters
----------
vals : array or list
threshold: float
Threshold above `minval` for determining whether to count as
twosided interval.
minval: float
Minimum possible value for posterior.
lower_q: float
Percentile value to evaluate lower bound.
"""
lowerbound = np.percentile(vals, q=lower_q)
if lowerbound > minval + threshold:
return False
else:
return True
def gorilla_bf(array, max=-4, min=-10, nbins=None):
"""
Function to determine if the smallest amplitude bin is more or less probable
than the prior.
"""
prior = 1/(max-min)
if nbins is None:
nbins=int(max-min)
bins = np.linspace(min, max, nbins+1)
hist, _ = np.histogram(array, bins=bins, density=True)
if hist[0] == 0:
return np.nan
else:
return prior/hist[0]
def get_rn_freqs(core):
"""
Get red noise frequency array from a core, with error message if noise
array has not been included.
"""
if core.rn_freqs is None:
raise ValueError('Please set red noise frequency array in '
' the core named {0}.'.format(core.label))
else:
return core.rn_freqs, core.rn_freqs.size
def get_Tspan(pulsar, filepath=None, fourier_components=None,
datadir=None):
"""
Function for getting timespan of a set of pulsar dataself.
Parameters
----------
pulsar : str
filepath : str
Filepath to a `txt` file with pulsar name and timespan in two
columns. If supplied this file is used to return the timespan.
fourier_components : list or array
Frequencies used in gaussian process modeling. If given
`1/numpy.amin(fourier_components)` is retruned as timespan.
datadir : str
Directory with pulsar data (assumed the same for `tim` and `par`
files.) Calls the `utils.get_Tspan()` method which loads an
`enterprise.Pulsar()` and extracts the timespan.
"""
if filepath:
if os.path.isfile(filepath):
data = np.loadtxt(filepath, dtype='str')
psrs = list(data[:, 0])
return float(data[psrs.index(pulsar), 1])
# elif os.path.isdir(filepath):
elif datadir is not None:
return utils.get_Tspan(pulsar, datadir)
elif fourier_components is not None:
return 1/np.amin(fourier_components)
def plot_rednoise_spectrum(pulsar, cores, show_figure=True, rn_types=None, # noqa: C901
plot_2d_hist=True, verbose=True, Tspan=None,
title_suffix='', freq_yr=1, plotpath=None,
cmap='gist_rainbow', show_title=True,
n_plaw_realizations=0, n_tproc_realizations=1000,
n_bplaw_realizations=100, Colors=None, bins=30,
labels=None, legend=True, legend_loc=None, leg_alpha=1.0,
Bbox_anchor=(0.5, -0.25, 1.0, 0.2),
line_xtra=None, line_ytra=None, freq_xtra=None,
freq_ytra=None, free_spec_min=None, free_spec_ci=95,
free_spec_violin=False, free_spec_errorbar=True,
free_spec_errorbar_ul=False, free_spec_ul=False,
ncol=None, plot_density=None, plot_contours=None,
add_2d_scatter=None, bplaw_kwargs={},
return_plot=False, excess_noise=False,
levels=(0.39346934, 0.86466472, 0.988891,),
plot_orf=False, orf_ci=None, orf_linestyle=None,
orf_bins=None, orf_types=None,
orf_separation=None, orf_realizations=False):
"""
Function to plot various red noise parameters in the same figure.
Parameters
----------
pulsar : str
cores : list
List of `la_forge.core.Core()` objects which contain the posteriors
for the relevant red noise parameters to be plotted.
Tspan : float, optional
Timespan of the data set. Used for converting amplitudes to
residual time. Calculated from lowest red noise frequency if not
provided.
show_figure : bool
rn_types : list {'','_dm_gp','_chrom_gp','_red_noise','gw'}
List of strings to choose which type of red noise
parameters are used in each of the plots.
plot_2d_hist : bool, optional
Whether to include two dimensional histogram of powerlaw red noise
parameters.
verbose : bool, optional
title_suffix : str, optional
Added to title of red noise plot as:
'Red Noise Spectrum: ' + pulsar + ' ' + title_suffix
freq_yr : int , optional
Number of 1/year harmonics to include in plot.
plotpath : str, optional
Path and file name to which plot will be saved.
cmap : str, optional
Color map from which to cycle plot colrs, if not given in Colors
kwarg.
n_plaw_realizations : int, optional
Number of powerlaw realizations to plot.
n_tproc_realizations : int, optional
Number of T-process realizations to plot.
Colors : list, optional
List of colors to cycle through in plots.
labels : list, optional
Labels of various plots, for legend.
legend_loc : tuple or str, optional
Legend location with respect to Bbox_anchor.
leg_alpha : float, optional
Opacity of legend background.
Bbox_anchor : tuple, optional
This is the bbox_to_anchor value for the legend.
"""
if any([c.rn_freqs is None for c in cores]):
msg = 'Red noise frequencies must be set before plotting red '
msg += 'noise figures.\n'
msg += 'Please use core.set_rn_freqs() to set, if needed.'
raise ValueError(msg)
if plot_2d_hist:
fig, axes = plt.subplots(1, 2, figsize=(12, 4.2))
elif excess_noise:
axes = []
fig = plt.figure(figsize=(7, 4))
ax1 = plt.subplot2grid((4, 4), (0, 0), colspan=4, rowspan=3, fig=fig)
ax2 = plt.subplot2grid((4, 4), (3, 0), colspan=4, rowspan=1,
fig=fig) # , sharex=ax1)
axes.append(ax1)
axes.append(ax2)
else:
axes = []
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
axes.append(ax)
if plot_density is not None and (len(plot_density)!=len(cores)):
raise ValueError('\"plot_density\" list must have the same '
'number of entries as \"cores\"')
elif plot_density is None:
plot_density = np.zeros_like(cores, dtype=bool)
if plot_contours is not None and (len(plot_contours)!=len(cores)):
raise ValueError('\"plot_contours\" list must have the same '
'number of entries as \"cores\"')
elif plot_contours is None:
plot_contours = np.ones_like(cores, dtype=bool)
ax1_ylim = []
free_spec_ct = 0
tproc_ct = 0
tproc_adapt_ct = 0
plaw_ct = 0
color_idx = 0
lines = []
if labels is None:
make_labels = True
labels = []
else:
make_labels = False
if Colors is None:
cm = plt.get_cmap(cmap)
NUM_COLORS = len(cores)
Colors = cm(np.arange(NUM_COLORS)/NUM_COLORS)
for ii, (c, rn_type) in enumerate(zip(cores, rn_types)):
if all([pulsar not in par for par in c.params]):
raise ValueError('Pulsar not in any parameter names.')
# Free Spectral Plotting
if pulsar + rn_type + '_log10_rho_0' in c.params:
Color = Colors[color_idx]
if free_spec_ct==1:
Fillstyle = 'none'
else:
Fillstyle = 'full'
par_root = pulsar + rn_type + '_log10_rho'
plot_free_spec(c, axes[0], Tspan=Tspan, parname_root=par_root,
prior_min=free_spec_min, Color=Color,
ci=free_spec_ci, Fillstyle=Fillstyle,
verbose=verbose, violin=free_spec_violin,
errorbar=free_spec_errorbar,
errorbar_ul=free_spec_errorbar_ul,
plot_ul=free_spec_ul)
lines.append(plt.Line2D([0], [0], color=Color, linestyle='None',
marker='o', fillstyle=Fillstyle))
if make_labels is True:
labels.append('Free Spectral')
free_spec_ct += 1
color_idx += 1
# T-Process Plotting
elif pulsar + rn_type + '_alphas_0' in c.params:
amp_par = pulsar+rn_type+'_log10_A'
gam_par = pulsar+rn_type+'_gamma'
Color = Colors[color_idx]
par_root = pulsar + rn_type + '_alphas'
plot_tprocess(c, axes[0], amp_par=amp_par, gam_par=gam_par,
alpha_parname_root=par_root, Color=Color,
n_realizations=n_tproc_realizations,
Tspan=Tspan)
if plot_2d_hist:
corner.hist2d(c.get_param(gam_par)[c.burn:],
c.get_param(amp_par)[c.burn:],
bins=bins, ax=axes[1], plot_datapoints=False,
plot_density=plot_density[ii],
plot_contours=plot_contours[ii],
no_fill_contours=True, color=Color)
ax1_ylim.append(list(axes[1].get_ylim()))
# Track lines and labels for legend
lines.append(plt.Line2D([0], [0], color=Color, linewidth=2))
if make_labels is True:
labels.append('T-Process')
tproc_ct += 1
color_idx += 1
# Adaptive T-Process Plotting
elif pulsar + rn_type + '_alphas_adapt_0' in c.params:
amp_par = pulsar+rn_type+'_log10_A'
gam_par = pulsar+rn_type+'_gamma'
Color = Colors[color_idx]
alpha_par = pulsar + rn_type + '_alphas_adapt_0'
nfreq_par = pulsar + rn_type + '_nfreq'
plot_adapt_tprocess(c, axes[0], amp_par=amp_par, gam_par=gam_par,
alpha_par=alpha_par, nfreq_par=nfreq_par,
n_realizations=100, Color=Color,
Tspan=Tspan)
if plot_2d_hist:
corner.hist2d(c.get_param(gam_par)[c.burn:],
c.get_param(amp_par)[c.burn:],
bins=bins, ax=axes[1], plot_datapoints=False,
plot_density=plot_density[ii],
plot_contours=plot_contours[ii],
no_fill_contours=True, color=Color)
ax1_ylim.append(list(axes[1].get_ylim()))
# Track lines and labels for legend
lines.append(plt.Line2D([0], [0], color=Color, linewidth=2))
if make_labels is True:
labels.append('Adaptive T-Process')
tproc_adapt_ct += 1
color_idx += 1
# Broken Power Law Plotting
elif pulsar + rn_type + '_log10_fb' in c.params:
amp_par = pulsar + rn_type + '_log10_A'
gam_par = pulsar + rn_type + '_gamma'
fb_par = pulsar + rn_type + '_log10_fb'
del_par = pulsar + rn_type + '_delta'
kappa_par = pulsar + rn_type + '_kappa'
Color = Colors[color_idx]
plot_broken_powerlaw(c, axes[0], amp_par, gam_par, del_par,
fb_par, kappa_par,
verbose=True, Color=Color,
Linestyle='-',
n_realizations=n_bplaw_realizations,
Tspan=None, to_resid=True, **bplaw_kwargs)
if plot_2d_hist:
corner.hist2d(c.get_param(gam_par)[c.burn:],
c.get_param(amp_par)[c.burn:],
bins=bins, ax=axes[1], plot_datapoints=False,
plot_density=plot_density[ii],
plot_contours=plot_contours[ii],
no_fill_contours=True, color=Color)
ax1_ylim.append(list(axes[1].get_ylim()))
# Track lines and labels for legend
lines.append(plt.Line2D([0], [0], color=Color, linewidth=2))
if make_labels is True:
labels.append('Broken Power Law')
tproc_adapt_ct += 1
color_idx += 1
### Flat Powerlaw Plotting
elif pulsar + rn_type + '_log10_B' in c.params:
amp_par = pulsar+rn_type+'_log10_A'
gam_par = pulsar+rn_type+'_gamma'
flat_par = pulsar+rn_type+'_log10_B'
if plaw_ct==1:
Linestyle = '-'
else:
Linestyle = '-'
Color = Colors[color_idx]
plot_flat_powerlaw(c, axes[0], amp_par, gam_par, flat_par,
Color=Color, Linestyle=Linestyle, Tspan=None,
verbose=verbose,
n_realizations=n_plaw_realizations)
if plot_2d_hist:
corner.hist2d(c.get_param(gam_par, to_burn=True),
c.get_param(amp_par, to_burn=True),
bins=bins, ax=axes[1], plot_datapoints=False,
plot_density=plot_density[ii],
plot_contours=plot_contours[ii],
no_fill_contours=True, color=Color,
levels=levels)
ax1_ylim.append(list(axes[1].get_ylim()))
lines.append(plt.Line2D([0], [0],color=Color,linewidth=2,
linestyle=Linestyle))
if make_labels is True: labels.append('Flat Power Law')
plaw_ct += 1
color_idx += 1
### Legendre ORF Plotting
elif 'gw_orf_legendre_0' in c.params:
amp_par = pulsar+rn_type+'_log10_A'
gam_par = pulsar+rn_type+'_gamma'
par_root = 'gw_orf_legendre'
if plaw_ct==1:
Linestyle = '-'
else:
Linestyle = '-'
Color = Colors[color_idx]
if plot_orf:
plot_decomp_orf(c, axes[0], parname_root=par_root,
Color=Color, Linestyle=orf_linestyle,
ci=orf_ci, separation=orf_separation,
tp=orf_types, realizations=orf_realizations,
decomp='legendre')
else:
plot_powerlaw(c, axes[0], amp_par, gam_par, Color=Color,
Linestyle=Linestyle, Tspan=None, verbose=verbose,
n_realizations=n_plaw_realizations)
if plot_2d_hist:
corner.hist2d(c.get_param(gam_par, to_burn=True),
c.get_param(amp_par, to_burn=True),
bins=bins, ax=axes[1], | |
<filename>GUAP/generate_perturbation.py
#!/usr/bin/env python
# coding: utf-8
from __future__ import division
from __future__ import print_function
import os
import time
import argparse
import numpy as np
import math
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from utils import load_data, accuracy, normalize, load_polblogs_data
from models import GCN
from torch.autograd.gradcheck import zero_gradients
import os.path as op
os.environ["CUDA_VISIBLE_DEVICES"]="1"
parser = argparse.ArgumentParser()
parser.add_argument('cuda', action='store_true', default=True,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset', type=str, default="citeseer",
help='The name of the network dataset.')
parser.add_argument('--radius', type=int, default=12,
help='The radius of l2 norm projection')
parser.add_argument('--fake_rate', type=float, default=0.02,
help='The ratio of patch nodes to the graph size')
parser.add_argument('--step', type=int, default=10,
help='The learning step of updating the connection entries')
parser.add_argument('--sample_percent', type=int, default=40,
help='The sampling ratio of train set')
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.dataset == "polblogs":
tmp_adj, tmp_feat, labels, train_idx, val_idx, test_idx = load_polblogs_data()
else:
_, _, labels, train_idx, val_idx, test_idx, tmp_adj, tmp_feat = load_data(args.dataset)
num_classes = labels.max().item() + 1
# tmp_adj = tmp_adj.toarray()
adj = tmp_adj
adj = np.eye(tmp_adj.shape[0]) + adj
adj, _ = normalize(adj)
adj = torch.from_numpy(adj.astype(np.float32))
feat, _ = normalize(tmp_feat)
feat = torch.FloatTensor(np.array(feat.todense()))
tmp_feat = tmp_feat.todense()
num_fake = int(tmp_adj.shape[0] * args.fake_rate)
global new_feat
global new_adj
# args.radius = int(np.sum(tmp_adj)/tmp_adj.shape[0])
# Model and optimizer
model = GCN(nfeat=feat.shape[1],
nhid=args.hidden,
nclass=num_classes,
dropout=args.dropout
)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = feat.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = train_idx.cuda()
idx_val = val_idx.cuda()
idx_test = test_idx.cuda()
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
x = Variable(adj, requires_grad=True)
output = model(features, x)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
def test(feat, adj_m):
model.eval()
output = model(feat, adj_m)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return output
t_total = time.time()
for epoch in range(args.epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# torch.save(model, './cora_gcn.pth')
# torch.save(model.state_dict(), 'cora_gcn.pkl')
# Testing
ori_output = test(features, adj)
correct_res = ori_output[idx_train, labels[idx_train]] #the prediction probability of all train nodes
def add_fake_node(adj, innormal_features, features, file_path):
#modify the adjencecy matrix
num_ori = adj.shape[0]
num_new = num_ori + num_fake
C = np.zeros((num_ori, num_fake))
CT = np.zeros((num_fake, num_ori))
B = np.zeros((num_fake, num_fake))
##################
# B = np.ones((num_fake, num_fake)) - np.eye(num_fake)
##################
adj = np.concatenate((adj, C), axis = 1)
CTB = np.concatenate((CT, B), axis = 1)
adj = np.concatenate((adj, CTB), axis = 0)
#add the node features
# sel_idx = torch.randint(0, num_ori, (num_fake,))
# feat_fake = features[sel_idx]
feat_fake = gaussian_dist(innormal_features)
features = np.concatenate((features, feat_fake), 0)
np.save(file_path, features)
features = torch.from_numpy(features)
return adj, features
def gaussian_dist(innormal_features):
# while True: #the generated node feature shouldnt be all 0
feat_mean = np.mean(innormal_features, axis = 0)
feat_std = np.std(innormal_features, axis = 0)
feat_fake = np.zeros((num_fake, innormal_features.shape[1]))
for i in range(innormal_features.shape[1]):
feat_fake[:,i] = np.random.normal(feat_mean[0, i], feat_std[0, i], num_fake).reshape(feat_fake[:,i].shape)
# print (i, feat_fake[:,i])
feat_fake = np.where(feat_fake > 0.5, 1, 0).astype(np.float32)
feat_fake, _ = normalize(feat_fake)
# if np.sum(feat_fake) >= num_fake:
# break
return feat_fake
# for i in range(10):
# folder_path = op.join("./", "version4_new_adj/{0}/fake{1}_radius{2}".format(args.dataset, num_fake, args.radius))
# adj_path = op.join(folder_path, 'adj{}.npy'.format(i))
# feat_path = op.join(folder_path, 'feat{}.npy'.format(i))
# new_adj, new_feat = add_fake_node(tmp_adj, tmp_feat, feat, feat_path)
# print (torch.sum(new_feat[-num_fake:], 1))
def add_perturb(input_adj, idx, perturb):
# (1-x)A + x(1-A)
# input_adj = input_adj.toarray()
x = np.zeros((input_adj.shape[0], input_adj.shape[1]))
x[idx] = perturb
x[:,idx] = perturb
# print ('x', x[idx])
# x += np.transpose(x) #change the idx'th row and column
x1 = np.ones((input_adj.shape[0], input_adj.shape[1])) - x
# print ('x1', x1[idx])
adj2 = np.ones((input_adj.shape[0], input_adj.shape[1])) - input_adj
# print ('adj2', adj2[idx])
for i in range(input_adj.shape[0]):
adj2[i][i] = 0
perturbed_adj = np.multiply(x1, input_adj) + np.multiply(x, adj2)
return perturbed_adj
def modify_adj(input_adj, perturb, idx):
input_adj = np.add(input_adj,perturb, casting ="unsafe")
input_adj[idx, -num_fake:] = 1 - input_adj[idx, -num_fake:]
input_adj[-num_fake:, idx] = input_adj[idx, -num_fake:]
for i in range(-num_fake, 0):
input_adj[i,i] = 0
input_adj[:, i] = proj_lp(input_adj[:, i])
input_adj[i] = input_adj[:, i]
input_adj = np.clip(input_adj, 0, 1)
return input_adj
def proj_lp(v, xi=args.radius, p=2):
# def proj_lp(v, xi=8, p=2):
# Project on the lp ball centered at 0 and of radius xi
# SUPPORTS only p = 2 and p = Inf for now
# print ('the distance of v', np.linalg.norm(v.flatten(1)))
if p == 2:
v = v * min(1, xi/np.linalg.norm(v.flatten(1)))
# v = v / np.linalg.norm(v.flatten(1)) * xi
elif p == np.inf:
v = np.sign(v) * np.minimum(abs(v), xi)
else:
v = v
#################
v = np.clip(v, 0, 1)
########################
# v = np.where(v<0.1, 0, v)
#to reduce the number of nonzero elements which means
#the times of perturbation, also prevents saddle point
# v = np.where(v>0.5, 1, 0)
return v
def universal_attack(attack_epoch, max_epoch, file_path):
model.eval()
# delta = 0.02
fooling_rate = 0.0
overshoot = 0.02
max_iter_df = 10
# new_feat = torch.from_numpy(new_feat.astype(np.float32))
# new_feat = new_feat.cuda()
v1 = np.zeros(tmp_adj.shape[0]).astype(np.float32)
v2 = np.ones(num_fake).astype(np.float32)
v = np.concatenate((v1, v2))
# stdv = 1./math.sqrt(tmp_adj.shape[0])
# v = np.random.uniform(-stdv, stdv, tmp_adj.shape[0])
cur_foolingrate = 0.0
epoch = 0
early_stop = 0
results = []
tmp_new_adj = np.copy(new_adj)
print ('the new adj', np.sum(tmp_new_adj[-num_fake:, :-num_fake]))
while epoch < max_epoch:
epoch += 1
train_idx = idx_train.cpu().numpy()
np.random.shuffle(train_idx)
###############################################
attack_time = time.time()
for k in train_idx:
#add v to see if the attack succeeds
innormal_x_p = add_perturb(tmp_new_adj, k, v)
##################whether to use filtering
# innormal_x_p = np.where(innormal_x_p<0.5, 0, 1)
x_p, degree_p = normalize(innormal_x_p + np.eye(tmp_new_adj.shape[0])) #A' = A + I
x_p = torch.from_numpy(x_p.astype(np.float32))
x_p = x_p.cuda()
output = model(new_feat, x_p)
if int(torch.argmax(output[k])) == int(torch.argmax(ori_output[k])):
dr, iter = IGP(innormal_x_p, x_p, k, num_classes, degree_p)
if iter < max_iter_df-1:
tmp_new_adj = modify_adj(tmp_new_adj, dr, k)
else:
print ('cant attack this node')
else:
print ('attack succeeds')
print ('the new adj', np.sum(tmp_new_adj[-num_fake:, :-num_fake]))
print ('the IGP time cost is', time.time()-attack_time)
res = []
# v = np.where(v>0.5, 1, 0)
tmp_new_adj = np.where(tmp_new_adj>0.5, 1, 0)
print ('C adjacency matrix', np.sum(tmp_new_adj[-num_fake:, :-num_fake]))
print ('B adjacency matrix', np.sum(tmp_new_adj[-num_fake:, -num_fake:]))
for k in train_idx:
print ('test node', k)
innormal_x_p = add_perturb(tmp_new_adj, k, v)
# innormal_x_p = np.where(innormal_x_p<0.5, 0, 1)
x_p, degree_p = normalize(innormal_x_p + np.eye(tmp_new_adj.shape[0]))
x_p = torch.from_numpy(x_p.astype(np.float32))
x_p = x_p.cuda()
output = model(new_feat, x_p)
if int(torch.argmax(output[k])) == int(torch.argmax(ori_output[k])):
res.append(0)
else:
res.append(1)
fooling_rate = float(sum(res)/len(res))
print ('the current train fooling rates are', fooling_rate)
# test_res = []
# print ('testing')
# test_idx = idx_test.cpu().numpy()
# for k in test_idx:
# print ('test node', k)
# innormal_x_p = add_perturb(tmp_new_adj, k, v)
# # innormal_x_p = np.where(innormal_x_p<0.5, 0, 1)
# x_p, degree_p = normalize(innormal_x_p + np.eye(tmp_new_adj.shape[0]))
# x_p = torch.from_numpy(x_p.astype(np.float32))
# x_p = x_p.cuda()
# output = model(new_feat, x_p)
# if int(torch.argmax(output[k])) == int(torch.argmax(ori_output[k])):
# test_res.append(0)
# else:
# test_res.append(1)
# test_fooling_rate = float(sum(test_res)/len(test_res))
# print ('the current test fooling rates are', test_fooling_rate)
if fooling_rate > cur_foolingrate:
cur_foolingrate = fooling_rate
np.save(file_path, tmp_new_adj)
results.append(fooling_rate)
return cur_foolingrate
def calculate_grad_class(pert_adj, idx, classes):
x = Variable(pert_adj, requires_grad=True)
output = model(new_feat, x)
grad = []
# for i in range(classes):
for i in classes:
cls = torch.LongTensor(np.array(i).reshape(1)).cuda()
loss = F.nll_loss(output[idx:idx+1], cls)
loss.backward(retain_graph=True)
grad.append(x.grad[idx].cpu().numpy())
# print ('grad', grad)
return np.array(grad)
def calculate_grad(pert_adj, idx): ######exclude idx?
x = Variable(pert_adj, requires_grad=True)
output = model(new_feat, x)
ex_idx_train = train_idx.numpy()
ex_idx_train = np.delete(ex_idx_train, np.where(ex_idx_train == idx))
ex_idx_train = torch.LongTensor(ex_idx_train).cuda()
loss_train = F.nll_loss(output[ex_idx_train], labels[ex_idx_train])
loss_train.backward(retain_graph=True)
gradient = np.array(x.grad.cpu().numpy())
gradient[idx] = 0
gradient[:,idx] = 0
gradient[:-num_fake,:-num_fake] = 0
###############
# gradient[-num_fake:, -num_fake:] = 0
###############
np.fill_diagonal(gradient, np.float32(0)) #let the diagonal of the adjancecy matrix always be 0
# print ('type', type(gradient))
gradient = (gradient + gradient.transpose())/2
return gradient
def normalize_add_perturb(ori_adj, pert, single_node, idx, rate):
if single_node:
a = ori_adj
a[idx] += pert[idx] * rate
a[:,idx] += pert[:,idx] * rate
else:
pert[idx] = pert[idx] * rate
pert[:, idx] = pert[:, idx] * rate
a = ori_adj + pert
inv_d = 1 + np.sum(pert, 1)
inv_d = 1.0/inv_d
## filter the perturbed matrix so that >= 0
# a = np.where(a<0, 0, a)
ori_adj = np.multiply(a.transpose(), inv_d).transpose()
return ori_adj
def IGP(innormal_adj, ori_adj, idx, num_classes, degree, overshoot=0.02, max_iter=30):
#innormal_adj: the perturbed adjacency matrix not normalized
#ori_adj: the normalized perturbed adjacency matrix
model.eval()
# new_feat = torch.from_numpy(new_feat.astype(np.float32))
# new_feat = new_feat.cuda()
pred = model(new_feat, ori_adj)[idx]
pred = pred.detach().cpu().numpy()
# step = | |
"#ba3655",
"#ed6825",
"#fbb318",
"#fcfea4"
],
8: [
"#000003",
"#270b52",
"#63146e",
"#9e2963",
"#d24742",
"#f57c15",
"#fabf25",
"#fcfea4"
],
9: [
"#000003",
"#1f0c47",
"#550f6d",
"#88216a",
"#ba3655",
"#e35832",
"#f98c09",
"#f8c931",
"#fcfea4"
],
10: [
"#000003",
"#1a0b40",
"#4a0b6a",
"#781c6d",
"#a42c60",
"#cd4247",
"#ed6825",
"#fb9906",
"#f7cf3a",
"#fcfea4"
],
11: [
"#000003",
"#160b39",
"#410967",
"#6a176e",
"#932567",
"#ba3655",
"#dc5039",
"#f2751a",
"#fba40a",
"#f6d542",
"#fcfea4"
],
256: [
"#000003",
"#000004",
"#000006",
"#010007",
"#010109",
"#01010b",
"#02010e",
"#020210",
"#030212",
"#040314",
"#040316",
"#050418",
"#06041b",
"#07051d",
"#08061f",
"#090621",
"#0a0723",
"#0b0726",
"#0d0828",
"#0e082a",
"#0f092d",
"#10092f",
"#120a32",
"#130a34",
"#140b36",
"#160b39",
"#170b3b",
"#190b3e",
"#1a0b40",
"#1c0c43",
"#1d0c45",
"#1f0c47",
"#200c4a",
"#220b4c",
"#240b4e",
"#260b50",
"#270b52",
"#290b54",
"#2b0a56",
"#2d0a58",
"#2e0a5a",
"#300a5c",
"#32095d",
"#34095f",
"#350960",
"#370961",
"#390962",
"#3b0964",
"#3c0965",
"#3e0966",
"#400966",
"#410967",
"#430a68",
"#450a69",
"#460a69",
"#480b6a",
"#4a0b6a",
"#4b0c6b",
"#4d0c6b",
"#4f0d6c",
"#500d6c",
"#520e6c",
"#530e6d",
"#550f6d",
"#570f6d",
"#58106d",
"#5a116d",
"#5b116e",
"#5d126e",
"#5f126e",
"#60136e",
"#62146e",
"#63146e",
"#65156e",
"#66156e",
"#68166e",
"#6a176e",
"#6b176e",
"#6d186e",
"#6e186e",
"#70196e",
"#72196d",
"#731a6d",
"#751b6d",
"#761b6d",
"#781c6d",
"#7a1c6d",
"#7b1d6c",
"#7d1d6c",
"#7e1e6c",
"#801f6b",
"#811f6b",
"#83206b",
"#85206a",
"#86216a",
"#88216a",
"#892269",
"#8b2269",
"#8d2369",
"#8e2468",
"#902468",
"#912567",
"#932567",
"#952666",
"#962666",
"#982765",
"#992864",
"#9b2864",
"#9c2963",
"#9e2963",
"#a02a62",
"#a12b61",
"#a32b61",
"#a42c60",
"#a62c5f",
"#a72d5f",
"#a92e5e",
"#ab2e5d",
"#ac2f5c",
"#ae305b",
"#af315b",
"#b1315a",
"#b23259",
"#b43358",
"#b53357",
"#b73456",
"#b83556",
"#ba3655",
"#bb3754",
"#bd3753",
"#be3852",
"#bf3951",
"#c13a50",
"#c23b4f",
"#c43c4e",
"#c53d4d",
"#c73e4c",
"#c83e4b",
"#c93f4a",
"#cb4049",
"#cc4148",
"#cd4247",
"#cf4446",
"#d04544",
"#d14643",
"#d24742",
"#d44841",
"#d54940",
"#d64a3f",
"#d74b3e",
"#d94d3d",
"#da4e3b",
"#db4f3a",
"#dc5039",
"#dd5238",
"#de5337",
"#df5436",
"#e05634",
"#e25733",
"#e35832",
"#e45a31",
"#e55b30",
"#e65c2e",
"#e65e2d",
"#e75f2c",
"#e8612b",
"#e9622a",
"#ea6428",
"#eb6527",
"#ec6726",
"#ed6825",
"#ed6a23",
"#ee6c22",
"#ef6d21",
"#f06f1f",
"#f0701e",
"#f1721d",
"#f2741c",
"#f2751a",
"#f37719",
"#f37918",
"#f47a16",
"#f57c15",
"#f57e14",
"#f68012",
"#f68111",
"#f78310",
"#f7850e",
"#f8870d",
"#f8880c",
"#f88a0b",
"#f98c09",
"#f98e08",
"#f99008",
"#fa9107",
"#fa9306",
"#fa9506",
"#fa9706",
"#fb9906",
"#fb9b06",
"#fb9d06",
"#fb9e07",
"#fba007",
"#fba208",
"#fba40a",
"#fba60b",
"#fba80d",
"#fbaa0e",
"#fbac10",
"#fbae12",
"#fbb014",
"#fbb116",
"#fbb318",
"#fbb51a",
"#fbb71c",
"#fbb91e",
"#fabb21",
"#fabd23",
"#fabf25",
"#fac128",
"#f9c32a",
"#f9c52c",
"#f9c72f",
"#f8c931",
"#f8cb34",
"#f8cd37",
"#f7cf3a",
"#f7d13c",
"#f6d33f",
"#f6d542",
"#f5d745",
"#f5d948",
"#f4db4b",
"#f4dc4f",
"#f3de52",
"#f3e056",
"#f3e259",
"#f2e45d",
"#f2e660",
"#f1e864",
"#f1e968",
"#f1eb6c",
"#f1ed70",
"#f1ee74",
"#f1f079",
"#f1f27d",
"#f2f381",
"#f2f485",
"#f3f689",
"#f4f78d",
"#f5f891",
"#f6fa95",
"#f7fb99",
"#f9fc9d",
"#fafda0",
"#fcfea4"
]
},
"bokeh_Magma": {
3: [
"#000003",
"#b53679",
"#fbfcbf"
],
4: [
"#000003",
"#711f81",
"#f0605d",
"#fbfcbf"
],
5: [
"#000003",
"#4f117b",
"#b53679",
"#fb8660",
"#fbfcbf"
],
6: [
"#000003",
"#3b0f6f",
"#8c2980",
"#dd4968",
"#fd9f6c",
"#fbfcbf"
],
7: [
"#000003",
"#2b115e",
"#711f81",
"#b53679",
"#f0605d",
"#feae76",
"#fbfcbf"
],
8: [
"#000003",
"#221150",
"#5d177e",
"#972c7f",
"#d1426e",
"#f8755c",
"#feb97f",
"#fbfcbf"
],
9: [
"#000003",
"#1b1044",
"#4f117b",
"#812581",
"#b53679",
"#e55063",
"#fb8660",
"#fec286",
"#fbfcbf"
],
10: [
"#000003",
"#170f3c",
"#430f75",
"#711f81",
"#9e2e7e",
"#cb3e71",
"#f0605d",
"#fc9366",
"#fec78b",
"#fbfcbf"
],
11: [
"#000003",
"#140d35",
"#3b0f6f",
"#63197f",
"#8c2980",
"#b53679",
"#dd4968",
"#f66e5b",
"#fd9f6c",
"#fdcd90",
"#fbfcbf"
],
256: [
"#000003",
"#000004",
"#000006",
"#010007",
"#010109",
"#01010b",
"#02020d",
"#02020f",
"#030311",
"#040313",
"#040415",
"#050417",
"#060519",
"#07051b",
"#08061d",
"#09071f",
"#0a0722",
"#0b0824",
"#0c0926",
"#0d0a28",
"#0e0a2a",
"#0f0b2c",
"#100c2f",
"#110c31",
"#120d33",
"#140d35",
"#150e38",
"#160e3a",
"#170f3c",
"#180f3f",
"#1a1041",
"#1b1044",
"#1c1046",
"#1e1049",
"#1f114b",
"#20114d",
"#221150",
"#231152",
"#251155",
"#261157",
"#281159",
"#2a115c",
"#2b115e",
"#2d1060",
"#2f1062",
"#301065",
"#321067",
"#341068",
"#350f6a",
"#370f6c",
"#390f6e",
"#3b0f6f",
"#3c0f71",
"#3e0f72",
"#400f73",
"#420f74",
"#430f75",
"#450f76",
"#470f77",
"#481078",
"#4a1079",
"#4b1079",
"#4d117a",
"#4f117b",
"#50127b",
"#52127c",
"#53137c",
"#55137d",
"#57147d",
"#58157e",
"#5a157e",
"#5b167e",
"#5d177e",
"#5e177f",
"#60187f",
"#61187f",
"#63197f",
"#651a80",
"#661a80",
"#681b80",
"#691c80",
"#6b1c80",
"#6c1d80",
"#6e1e81",
"#6f1e81",
"#711f81",
"#731f81",
"#742081",
"#762181",
"#772181",
"#792281",
"#7a2281",
"#7c2381",
"#7e2481",
"#7f2481",
"#812581",
"#822581",
"#842681",
"#852681",
"#872781",
"#892881",
"#8a2881",
"#8c2980",
"#8d2980",
"#8f2a80",
"#912a80",
"#922b80",
"#942b80",
"#952c80",
"#972c7f",
"#992d7f",
"#9a2d7f",
"#9c2e7f",
"#9e2e7e",
"#9f2f7e",
"#a12f7e",
"#a3307e",
"#a4307d",
"#a6317d",
"#a7317d",
"#a9327c",
"#ab337c",
"#ac337b",
"#ae347b",
"#b0347b",
"#b1357a",
"#b3357a",
"#b53679",
"#b63679",
"#b83778",
"#b93778",
"#bb3877",
"#bd3977",
"#be3976",
"#c03a75",
"#c23a75",
"#c33b74",
"#c53c74",
"#c63c73",
"#c83d72",
"#ca3e72",
"#cb3e71",
"#cd3f70",
"#ce4070",
"#d0416f",
"#d1426e",
"#d3426d",
"#d4436d",
"#d6446c",
"#d7456b",
"#d9466a",
"#da4769",
"#dc4869",
"#dd4968",
"#de4a67",
"#e04b66",
"#e14c66",
"#e24d65",
"#e44e64",
"#e55063",
"#e65162",
"#e75262",
"#e85461",
"#ea5560",
"#eb5660",
"#ec585f",
"#ed595f",
"#ee5b5e",
"#ee5d5d",
"#ef5e5d",
"#f0605d",
"#f1615c",
"#f2635c",
"#f3655c",
"#f3675b",
"#f4685b",
"#f56a5b",
"#f56c5b",
"#f66e5b",
"#f6705b",
"#f7715b",
"#f7735c",
"#f8755c",
"#f8775c",
"#f9795c",
"#f97b5d",
"#f97d5d",
"#fa7f5e",
"#fa805e",
"#fa825f",
"#fb8460",
"#fb8660",
"#fb8861",
"#fb8a62",
"#fc8c63",
"#fc8e63",
"#fc9064",
"#fc9265",
"#fc9366",
"#fd9567",
"#fd9768",
"#fd9969",
"#fd9b6a",
"#fd9d6b",
"#fd9f6c",
"#fda16e",
"#fda26f",
"#fda470",
"#fea671",
"#fea873",
"#feaa74",
"#feac75",
"#feae76",
"#feaf78",
"#feb179",
"#feb37b",
"#feb57c",
"#feb77d",
"#feb97f",
"#febb80",
"#febc82",
"#febe83",
"#fec085",
"#fec286",
"#fec488",
"#fec689",
"#fec78b",
"#fec98d",
"#fecb8e",
"#fdcd90",
"#fdcf92",
"#fdd193",
"#fdd295",
"#fdd497",
"#fdd698",
"#fdd89a",
"#fdda9c",
"#fddc9d",
"#fddd9f",
"#fddfa1",
"#fde1a3",
"#fce3a5",
"#fce5a6",
"#fce6a8",
"#fce8aa",
"#fceaac",
"#fcecae",
"#fceeb0",
"#fcf0b1",
"#fcf1b3",
"#fcf3b5",
"#fcf5b7",
"#fbf7b9",
"#fbf9bb",
"#fbfabd",
"#fbfcbf"
]
},
"bokeh_Plasma": {
3: [
"#0c0786",
"#ca4678",
"#eff821"
],
4: [
"#0c0786",
"#9b179e",
"#ec7853",
"#eff821"
],
5: [
"#0c0786",
"#7c02a7",
"#ca4678",
"#f79341",
"#eff821"
],
6: [
"#0c0786",
"#6a00a7",
"#b02a8f",
"#e06461",
"#fca635",
"#eff821"
],
7: [
"#0c0786",
"#5c00a5",
"#9b179e",
"#ca4678",
"#ec7853",
"#fdb22f",
"#eff821"
],
8: [
"#0c0786",
"#5201a3",
"#8908a5",
"#b83289",
"#da5a68",
"#f38748",
"#fdbb2b",
"#eff821"
],
9: [
"#0c0786",
"#4a02a0",
"#7c02a7",
"#a82296",
"#ca4678",
"#e56b5c",
"#f79341",
"#fdc328",
"#eff821"
],
10: [
"#0c0786",
"#45039e",
"#7200a8",
"#9b179e",
"#bc3685",
"#d7566c",
"#ec7853",
"#fa9d3a",
"#fcc726",
"#eff821"
],
11: [
"#0c0786",
"#40039c",
"#6a00a7",
"#8f0da3",
"#b02a8f",
"#ca4678",
"#e06461",
"#f1824c",
"#fca635",
"#fccc25",
"#eff821"
],
256: [
"#0c0786",
"#100787",
"#130689",
"#15068a",
"#18068b",
"#1b068c",
"#1d068d",
"#1f058e",
"#21058f",
"#230590",
"#250591",
"#270592",
"#290593",
"#2b0594",
"#2d0494",
"#2f0495",
"#310496",
"#330497",
"#340498",
"#360498",
"#380499",
"#3a049a",
"#3b039a",
"#3d039b",
"#3f039c",
"#40039c",
"#42039d",
"#44039e",
"#45039e",
"#47029f",
"#49029f",
"#4a02a0",
"#4c02a1",
"#4e02a1",
"#4f02a2",
"#5101a2",
"#5201a3",
"#5401a3",
"#5601a3",
"#5701a4",
"#5901a4",
"#5a00a5",
"#5c00a5",
"#5e00a5",
"#5f00a6",
"#6100a6",
"#6200a6",
"#6400a7",
"#6500a7",
"#6700a7",
"#6800a7",
"#6a00a7",
"#6c00a8",
"#6d00a8",
"#6f00a8",
"#7000a8",
"#7200a8",
"#7300a8",
"#7500a8",
"#7601a8",
"#7801a8",
"#7901a8",
"#7b02a8",
"#7c02a7",
"#7e03a7",
"#7f03a7",
"#8104a7",
"#8204a7",
"#8405a6",
"#8506a6",
"#8607a6",
"#8807a5",
"#8908a5",
"#8b09a4",
"#8c0aa4",
"#8e0ca4",
"#8f0da3",
"#900ea3",
"#920fa2",
"#9310a1",
"#9511a1",
"#9612a0",
"#9713a0",
"#99149f",
"#9a159e",
"#9b179e",
"#9d189d",
"#9e199c",
"#9f1a9b",
"#a01b9b",
"#a21c9a",
"#a31d99",
"#a41e98",
"#a51f97",
"#a72197",
"#a82296",
"#a92395",
"#aa2494",
"#ac2593",
"#ad2692",
"#ae2791",
"#af2890",
"#b02a8f",
"#b12b8f",
"#b22c8e",
"#b42d8d",
"#b52e8c",
"#b62f8b",
"#b7308a",
"#b83289",
"#b93388",
"#ba3487",
"#bb3586",
"#bc3685",
"#bd3784",
"#be3883",
"#bf3982",
"#c03b81",
"#c13c80",
"#c23d80",
"#c33e7f",
"#c43f7e",
"#c5407d",
"#c6417c",
"#c7427b",
"#c8447a",
"#c94579",
"#ca4678",
"#cb4777",
"#cc4876",
"#cd4975",
"#ce4a75",
"#cf4b74",
"#d04d73",
"#d14e72",
"#d14f71",
"#d25070",
"#d3516f",
"#d4526e",
"#d5536d",
"#d6556d",
"#d7566c",
"#d7576b",
"#d8586a",
"#d95969",
"#da5a68",
"#db5b67",
"#dc5d66",
"#dc5e66",
"#dd5f65",
"#de6064",
"#df6163",
"#df6262",
"#e06461",
"#e16560",
"#e26660",
"#e3675f",
"#e3685e",
"#e46a5d",
"#e56b5c",
"#e56c5b",
"#e66d5a",
"#e76e5a",
"#e87059",
"#e87158",
"#e97257",
"#ea7356",
"#ea7455",
"#eb7654",
"#ec7754",
"#ec7853",
"#ed7952",
"#ed7b51",
"#ee7c50",
"#ef7d4f",
"#ef7e4e",
"#f0804d",
"#f0814d",
"#f1824c",
"#f2844b",
"#f2854a",
"#f38649",
"#f38748",
"#f48947",
"#f48a47",
"#f58b46",
"#f58d45",
"#f68e44",
"#f68f43",
"#f69142",
"#f79241",
"#f79341",
"#f89540",
"#f8963f",
"#f8983e",
"#f9993d",
"#f99a3c",
"#fa9c3b",
"#fa9d3a",
"#fa9f3a",
"#faa039",
"#fba238",
"#fba337",
"#fba436",
"#fca635",
"#fca735",
"#fca934",
"#fcaa33",
"#fcac32",
"#fcad31",
"#fdaf31",
"#fdb030",
"#fdb22f",
"#fdb32e",
"#fdb52d",
"#fdb62d",
"#fdb82c",
"#fdb92b",
"#fdbb2b",
"#fdbc2a",
"#fdbe29",
"#fdc029",
"#fdc128",
"#fdc328",
"#fdc427",
"#fdc626",
"#fcc726",
"#fcc926",
"#fccb25",
"#fccc25",
"#fcce25",
"#fbd024",
"#fbd124",
"#fbd324",
"#fad524",
"#fad624",
"#fad824",
"#f9d924",
"#f9db24",
"#f8dd24",
"#f8df24",
"#f7e024",
"#f7e225",
"#f6e425",
"#f6e525",
"#f5e726",
"#f5e926",
"#f4ea26",
"#f3ec26",
"#f3ee26",
"#f2f026",
"#f2f126",
"#f1f326",
"#f0f525",
"#f0f623",
"#eff821"
]
},
"bokeh_Viridis": {
3: [
"#440154",
"#208f8c",
"#fde724"
],
4: [
"#440154",
"#30678d",
"#35b778",
"#fde724"
],
5: [
"#440154",
"#3b518a",
"#208f8c",
"#5bc862",
"#fde724"
],
6: [
"#440154",
"#404387",
"#29788e",
"#22a784",
"#79d151",
"#fde724"
],
7: [
"#440154",
"#443982",
"#30678d",
"#208f8c",
"#35b778",
"#8dd644",
"#fde724"
],
8: [
"#440154",
"#46317e",
"#365a8c",
"#277e8e",
"#1ea087",
"#49c16d",
"#9dd93a",
"#fde724"
],
9: [
"#440154",
"#472b7a",
"#3b518a",
"#2c718e",
"#208f8c",
"#27ad80",
"#5bc862",
"#aadb32",
"#fde724"
],
10: [
"#440154",
"#472777",
"#3e4989",
"#30678d",
"#25828e",
"#1e9c89",
"#35b778",
"#6bcd59",
"#b2dd2c",
"#fde724"
],
11: [
"#440154",
"#482374",
"#404387",
"#345e8d",
"#29788e",
"#208f8c",
"#22a784",
"#42be71",
"#79d151",
"#bade27",
"#fde724"
],
256: [
"#440154",
"#440255",
"#440357",
"#450558",
"#45065a",
"#45085b",
"#46095c",
"#460b5e",
"#460c5f",
"#460e61",
"#470f62",
"#471163",
"#471265",
"#471466",
"#471567",
"#471669",
"#47186a",
"#48196b",
"#481a6c",
"#481c6e",
"#481d6f",
"#481e70",
"#482071",
"#482172",
"#482273",
"#482374",
"#472575",
"#472676",
"#472777",
"#472878",
"#472a79",
"#472b7a",
"#472c7b",
"#462d7c",
"#462f7c",
"#46307d",
"#46317e",
"#45327f",
"#45347f",
"#453580",
"#453681",
"#443781",
"#443982",
"#433a83",
"#433b83",
"#433c84",
"#423d84",
"#423e85",
"#424085",
"#414186",
"#414286",
"#404387",
"#404487",
"#3f4587",
"#3f4788",
"#3e4888",
"#3e4989",
"#3d4a89",
"#3d4b89",
"#3d4c89",
"#3c4d8a",
"#3c4e8a",
"#3b508a",
"#3b518a",
"#3a528b",
"#3a538b",
"#39548b",
"#39558b",
"#38568b",
"#38578c",
"#37588c",
"#37598c",
"#365a8c",
"#365b8c",
"#355c8c",
"#355d8c",
"#345e8d",
"#345f8d",
"#33608d",
"#33618d",
"#32628d",
"#32638d",
"#31648d",
"#31658d",
"#31668d",
"#30678d",
"#30688d",
"#2f698d",
"#2f6a8d",
"#2e6b8e",
"#2e6c8e",
"#2e6d8e",
"#2d6e8e",
"#2d6f8e",
"#2c708e",
"#2c718e",
"#2c728e",
"#2b738e",
"#2b748e",
"#2a758e",
"#2a768e",
"#2a778e",
"#29788e",
"#29798e",
"#287a8e",
"#287a8e",
"#287b8e",
"#277c8e",
"#277d8e",
"#277e8e",
"#267f8e",
"#26808e",
"#26818e",
"#25828e",
"#25838d",
"#24848d",
"#24858d",
"#24868d",
"#23878d",
"#23888d",
"#23898d",
"#22898d",
"#228a8d",
"#228b8d",
"#218c8d",
"#218d8c",
"#218e8c",
"#208f8c",
"#20908c",
"#20918c",
"#1f928c",
"#1f938b",
"#1f948b",
"#1f958b",
"#1f968b",
"#1e978a",
"#1e988a",
"#1e998a",
"#1e998a",
"#1e9a89",
"#1e9b89",
"#1e9c89",
"#1e9d88",
"#1e9e88",
"#1e9f88",
"#1ea087",
"#1fa187",
"#1fa286",
"#1fa386",
"#20a485",
"#20a585",
"#21a685",
"#21a784",
"#22a784",
"#23a883",
"#23a982",
"#24aa82",
"#25ab81",
"#26ac81",
"#27ad80",
"#28ae7f",
"#29af7f",
"#2ab07e",
"#2bb17d",
"#2cb17d",
"#2eb27c",
"#2fb37b",
| |
= coords.frame.get_representation_component_names()
inv_dict = {val: key for key, val in comp_dict.items()}
self.lon = getattr(coords, inv_dict["lon"])
self.lat = getattr(coords, inv_dict["lat"])
self._frame_inst = frame
self._frame.value = frame.name
return
def healpix_interp_transform(
self,
frame,
full_sky=False,
inplace=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Transform a HEALPix map to a new frame and interp to new pixel centers.
This method is only available for a healpix type sky model.
Computes the pixel centers for a HEALPix map in the new frame,
then interpolates the old map using `astropy_healpix.interpolate_bilinear_skycoord`.
Conversion with this method may take some time as it must iterate over every
frequency and stokes parameter individually.
Currently no polarization fixing is performed by this method.
As a result, it does not support transformations for polarized catalogs
since this would induce a Q <--> U rotation.
Current implementation is equal to using a healpy.Rotator class to 1 part in 10^-5
(e.g `numpy.allclose(healpy_rotated_map, interpolate_bilinear_skycoord, rtol=1e-5) is True`).
Parameters
----------
frame : str, `BaseCoordinateFrame` class or instance.
The frame to transform this coordinate into.
Currently frame must be one of ["galactic", "icrs"].
full_sky : bool
When True returns a full sky catalog even when some pixels are zero.
Defaults to False.
inplace : bool
Option to do the change in place on the object rather than return a new
object. Default to True
run_check : bool
Option to check for the existence and proper shapes of parameters
after downselecting data on this object (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
downselecting data on this object (the default is True, meaning the
acceptable range check will be done).
"""
if inplace:
this = self
else:
this = self.copy()
if this.component_type != "healpix":
raise ValueError(
"Healpix frame interpolation is not valid for point source catalogs."
)
try:
import astropy_healpix
except ImportError as e:
raise ImportError(
"The astropy-healpix module must be installed to use HEALPix methods"
) from e
if np.any(this.stokes[1:] != units.Quantity(0, unit=this.stokes.unit)):
raise NotImplementedError(
"Healpix map transformations are currently not implemented for catalogs "
"with polarization information."
)
# quickly check the validity of the transformation using a dummy SkyCoord object.
coords = SkyCoord(0, 0, unit="rad", frame=this.frame)
# we will need the starting frame for some interpolation later
old_frame = coords.frame
coords = coords.transform_to(frame)
frame = coords.frame
if not isinstance(frame, (Galactic, ICRS)):
raise ValueError(
f"Supplied frame {frame.__class__.__name__} is not supported at "
"this time. Only 'galactic' and 'icrs' frames are currently supported.",
)
hp_obj_new = astropy_healpix.HEALPix(
nside=this.nside,
order=this.hpx_order,
frame=frame,
)
hp_obj_old = astropy_healpix.HEALPix(
nside=this.nside,
order=this.hpx_order,
frame=old_frame,
)
# It is not immediately obvious how many unique pixels the output
# array will have. Initialize a full healpix map, then we will downselect
# later to only valid pixels.
out_stokes = units.Quantity(
np.zeros((4, this.Nfreqs, hp_obj_new.npix)), unit=this.stokes.unit
)
# Need the coordinates of the pixel centers in the new frame
# then we will use these to interpolate for each freq/stokes
new_pixel_locs = hp_obj_new.healpix_to_skycoord(np.arange(hp_obj_new.npix))
for stokes_ind in range(4):
# We haven't implemented a Q+iU rotation fix yet.
if stokes_ind > 0:
continue
for freq_ind in range(this.Nfreqs):
masked_old_frame = np.ma.zeros(hp_obj_new.npix).astype(
this.stokes.dtype
)
# Default every pixel to masked, then unmask ones we have data for
masked_old_frame.mask = np.ones(masked_old_frame.size).astype(bool)
masked_old_frame.mask[this.hpx_inds] = False
masked_old_frame[this.hpx_inds] = this.stokes[
stokes_ind, freq_ind
].value
masked_new_frame = hp_obj_old.interpolate_bilinear_skycoord(
new_pixel_locs,
masked_old_frame,
)
out_stokes[stokes_ind, freq_ind] = units.Quantity(
masked_new_frame.data,
unit=this.stokes.unit,
)
if not full_sky:
# Each frequency/stokes combination should have the same input pixels
# and rotations, therefore the output mask should be equivalent.
this.hpx_inds = np.nonzero(~masked_new_frame.mask)[0]
else:
this.hpx_inds = np.arange(hp_obj_new.npix)
this.stokes = out_stokes[:, :, this.hpx_inds]
# the number of components can change when making this transformation!
this.Ncomponents = this.stokes.shape[2]
this._frame_inst = frame
this._frame.value = frame.name
# recalculate the coherency now that we are in the new frame
this.coherency_radec = skyutils.stokes_to_coherency(this.stokes)
if run_check:
this.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if not inplace:
return this
return
def kelvin_to_jansky(self):
"""
Apply a conversion to stokes from K-based units to Jy-based units.
No conversion is applied if stokes is already compatible with Jy
(for point component_type) or Jy/sr (for healpix component_type).
"""
this_unit = self.stokes.unit
if self.component_type == "point":
if this_unit.is_equivalent("Jy"):
return
else:
if this_unit.is_equivalent("Jy/sr"):
return
if self.spectral_type == "spectral_index" or (
self.spectral_type == "flat" and self.reference_frequency is not None
):
conv_factor = 1 / skyutils.jy_to_ksr(self.reference_frequency)
conv_factor = np.repeat(
np.repeat(conv_factor[np.newaxis, np.newaxis, :], 4, axis=0),
self.Nfreqs,
axis=1,
)
elif self.freq_array is not None:
conv_factor = 1 / skyutils.jy_to_ksr(self.freq_array)
conv_factor = np.repeat(
np.repeat(conv_factor[np.newaxis, :, np.newaxis], 4, axis=0),
self.Ncomponents,
axis=2,
)
else:
raise ValueError(
"Either reference_frequency or freq_array must be set to convert to Jy."
)
self.stokes = self.stokes * conv_factor
if self.stokes_error is not None:
self.stokes_error = self.stokes_error * conv_factor
if self.stokes.unit.is_equivalent("Jy"):
# need the `to(units.Jy)` call because otherwise even though it's in Jy,
# the units are a CompositeUnit object which doesn't have all the same
# functionality as a Unit object
self.stokes = self.stokes.to(units.Jy)
if self.stokes_error is not None:
self.stokes_error = self.stokes_error.to(units.Jy)
self.coherency_radec = skyutils.stokes_to_coherency(self.stokes)
def jansky_to_kelvin(self):
"""
Apply a conversion to stokes from Jy-based units to K-based units.
No conversion is applied if stokes is already compatible with K sr
(for point component_type) or K (for healpix component_type).
"""
this_unit = self.stokes.unit
if self.component_type == "point":
if this_unit.is_equivalent("K sr"):
return
else:
if this_unit.is_equivalent("K"):
return
if self.spectral_type == "spectral_index" or (
self.spectral_type == "flat" and self.reference_frequency is not None
):
conv_factor = skyutils.jy_to_ksr(self.reference_frequency)
conv_factor = np.repeat(
np.repeat(conv_factor[np.newaxis, np.newaxis, :], 4, axis=0),
self.Nfreqs,
axis=1,
)
elif self.freq_array is not None:
conv_factor = skyutils.jy_to_ksr(self.freq_array)
conv_factor = np.repeat(
np.repeat(conv_factor[np.newaxis, :, np.newaxis], 4, axis=0),
self.Ncomponents,
axis=2,
)
else:
raise ValueError(
"Either reference_frequency or freq_array must be set to convert to K."
)
self.stokes = self.stokes * conv_factor
if self.stokes_error is not None:
self.stokes_error = self.stokes_error * conv_factor
self.coherency_radec = skyutils.stokes_to_coherency(self.stokes)
def get_lon_lat(self):
"""
Retrieve ra and dec values for components.
This is mostly useful for healpix objects where the ra, dec values are not
stored on the object (only the healpix inds are stored, which can be converted
to ra/dec using this method).
"""
if self.component_type == "healpix":
try:
import astropy_healpix
except ImportError as e:
raise ImportError(
"The astropy-healpix module must be installed to use HEALPix "
"methods"
) from e
hp_obj = astropy_healpix.HEALPix(
nside=self.nside,
order=self.hpx_order,
frame=self._frame_inst,
)
coords = hp_obj.healpix_to_skycoord(
self.hpx_inds,
)
comp_dict = coords.frame.get_representation_component_names()
inv_dict = {val: key for key, val in comp_dict.items()}
return getattr(coords, inv_dict["lon"]), getattr(coords, inv_dict["lat"])
else:
return self.lon, self.lat
def healpix_to_point(
self,
to_jy=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""
Convert a healpix component_type object to a point component_type.
Multiply by the pixel area and optionally convert to Jy.
This effectively treats diffuse pixels as unresolved point sources by
integrating over the pixel area. Whether or not this is a good assumption
depends on the nside and the resolution of the telescope, so it should be
used with care, but it is provided here as a convenience.
Parameters
----------
to_jy : bool
Option to convert to Jy compatible units.
run_check : bool
Option to check for the existence and proper shapes of parameters
after downselecting data on this object (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
downselecting data on this object (the default is True, meaning the
acceptable range check will be done).
"""
if self.component_type != "healpix":
raise ValueError(
"This method can only be called if component_type is 'healpix'."
)
try:
import astropy_healpix
except ImportError as e:
raise ImportError(
"The astropy-healpix module must be installed to use HEALPix | |
node : PhyloTree.Clade
Tree node. **Note** because the method operates
on the sequences on both sides of a branch, sequence reconstruction
must be performed prior to calling this method.
"""
seq_pairs, multiplicity = self.gtr.compress_sequence_pair(node.up.cseq,
node.cseq,
pattern_multiplicity = self.multiplicity,
ignore_gaps = self.ignore_gaps)
node.compressed_sequence = {'pair':seq_pairs, 'multiplicity':multiplicity}
def _store_compressed_sequence_pairs(self):
"""
Traverse the tree, and for each node store the compressed sequence pair.
**Note** sequence reconstruction should be performed prior to calling
this method.
"""
self.logger("TreeAnc._store_compressed_sequence_pairs...",2)
for node in self.tree.find_clades():
if node.up is None:
continue
self._store_compressed_sequence_to_node(node)
self.logger("TreeAnc._store_compressed_sequence_pairs...done",3)
###################################################################
### Branch length
###################################################################
def optimize_branch_len(self, **kwargs):
"""Deprecated in favor of 'optimize_branch_lengths_joint'"""
return self.optimize_branch_lengths_joint(**kwargs)
def optimize_branch_len_joint(self, **kwargs):
"""Deprecated in favor of 'optimize_branch_lengths_joint'"""
return self.optimize_branch_lengths_joint(**kwargs)
def optimize_branch_lengths_joint(self, **kwargs):
"""
Perform optimization for the branch lengths of the entire tree.
This method only does a single path and needs to be iterated.
**Note** this method assumes that each node stores information
about its sequence as numpy.array object (node.sequence attribute).
Therefore, before calling this method, sequence reconstruction with
either of the available models must be performed.
Parameters
----------
**kwargs :
Keyword arguments
Keyword Args
------------
store_old : bool
If True, the old lengths will be saved in :code:`node._old_dist` attribute.
Useful for testing, and special post-processing.
"""
self.logger("TreeAnc.optimize_branch_length: running branch length optimization using jointML ancestral sequences",1)
if (self.tree is None) or (self.aln is None):
self.logger("TreeAnc.optimize_branch_length: ERROR, alignment or tree are missing", 0)
return ttconf.ERROR
store_old_dist = kwargs['store_old'] if 'store_old' in kwargs else False
max_bl = 0
for node in self.tree.find_clades(order='postorder'):
if node.up is None: continue # this is the root
if store_old_dist:
node._old_length = node.branch_length
new_len = max(0,self.optimal_branch_length(node))
self.logger("Optimization results: old_len=%.4e, new_len=%.4e, naive=%.4e"
" Updating branch length..."%(node.branch_length, new_len, len(node.mutations)*self.one_mutation), 5)
node.branch_length = new_len
node.mutation_length=new_len
max_bl = max(max_bl, new_len)
if max_bl>0.15:
self.logger("TreeAnc.optimize_branch_lengths_joint: THIS TREE HAS LONG BRANCHES."
" \n\t ****TreeTime's JOINT IS NOT DESIGNED TO OPTIMIZE LONG BRANCHES."
" \n\t ****PLEASE OPTIMIZE BRANCHES USING: "
" \n\t ****branch_length_mode='input' or 'marginal'", 0, warn=True)
# as branch lengths changed, the distance to root etc need to be recalculated
self.tree.root.up = None
self.tree.root.dist2root = 0.0
self._prepare_nodes()
return ttconf.SUCCESS
def optimal_branch_length(self, node):
'''
Calculate optimal branch length given the sequences of node and parent
Parameters
----------
node : PhyloTree.Clade
TreeNode, attached to the branch.
Returns
-------
new_len : float
Optimal length of the given branch
'''
if node.up is None:
return self.one_mutation
parent = node.up
if hasattr(node, 'compressed_sequence'):
new_len = self.gtr.optimal_t_compressed(node.compressed_sequence['pair'],
node.compressed_sequence['multiplicity'])
else:
new_len = self.gtr.optimal_t(parent.cseq, node.cseq,
pattern_multiplicity=self.multiplicity,
ignore_gaps=self.ignore_gaps)
return new_len
def marginal_branch_profile(self, node):
'''
calculate the marginal distribution of sequence states on both ends
of the branch leading to node,
Parameters
----------
node : PhyloTree.Clade
TreeNode, attached to the branch.
Returns
-------
pp, pc : Pair of vectors (profile parent, pp) and (profile child, pc)
that are of shape (L,n) where L is sequence length and n is alphabet size.
note that this correspond to the compressed sequences.
'''
parent = node.up
if parent is None:
raise Exception("Branch profiles can't be calculated for the root!")
if not hasattr(node, 'marginal_outgroup_LH'):
raise Exception("marginal ancestral inference needs to be performed first!")
pc = node.marginal_subtree_LH
pp = node.marginal_outgroup_LH
return pp, pc
def optimal_marginal_branch_length(self, node, tol=1e-10):
'''
calculate the marginal distribution of sequence states on both ends
of the branch leading to node,
Parameters
----------
node : PhyloTree.Clade
TreeNode, attached to the branch.
Returns
-------
branch_length : float
branch length of the branch leading to the node.
note: this can be unstable on iteration
'''
if node.up is None:
return self.one_mutation
if node.up.up is None and len(node.up.clades)==2:
# children of a bifurcating root!
other = node.up.clades[0] if node==node.up.clades[1] else node.up.clades[1]
bl_ratio = node.branch_length/(node.branch_length+other.branch_length)
pc = node.marginal_subtree_LH
pp = normalize_profile(other.marginal_subtree_LH*self.tree.root.marginal_outgroup_LH)[0]
new_bl = self.gtr.optimal_t_compressed((pp, pc), self.multiplicity, profiles=True, tol=tol)
return bl_ratio*new_bl
else:
pp, pc = self.marginal_branch_profile(node)
return self.gtr.optimal_t_compressed((pp, pc), self.multiplicity, profiles=True, tol=tol)
def prune_short_branches(self):
"""
If the branch length is less than the minimal value, remove the branch
from the tree. **Requires** ancestral sequence reconstruction
"""
self.logger("TreeAnc.prune_short_branches: pruning short branches (max prob at zero)...", 1)
for node in self.tree.find_clades():
if node.up is None or node.is_terminal():
continue
# probability of the two seqs separated by zero time is not zero
if ((node.branch_length<0.1*self.one_mutation) and
(self.gtr.prob_t(node.up.cseq, node.cseq, 0.0,
pattern_multiplicity=self.multiplicity) > 0.1)):
# re-assign the node children directly to its parent
node.up.clades = [k for k in node.up.clades if k != node] + node.clades
for clade in node.clades:
clade.up = node.up
def optimize_tree_marginal(self, max_iter=10, infer_gtr=False, pc=1.0, damping=0.75,
LHtol=0.1, site_specific_gtr=False):
self.infer_ancestral_sequences(marginal=True)
oldLH = self.sequence_LH()
self.logger("TreeAnc.optimize_tree_marginal: initial, LH=%1.2f, total branch_length %1.4f"%
(oldLH, self.tree.total_branch_length()), 2)
for i in range(max_iter):
if infer_gtr:
self.infer_gtr(site_specific=site_specific_gtr, marginal=True, normalized_rate=True, pc=pc)
self.infer_ancestral_sequences(marginal=True)
branch_length_changes = []
for n in self.tree.find_clades():
if n.up is None:
continue
new_val = self.optimal_marginal_branch_length(n, tol=1e-8 + 0.01**(i+1))
update_val = new_val*(1-damping**(i+1)) + n.branch_length*damping**(i+1)
branch_length_changes.append([n.branch_length, new_val, update_val])
n.branch_length = update_val
tmp = np.array(branch_length_changes)
dbl = np.mean(np.abs(tmp[:,0]-tmp[:,1])/(tmp[:,0]+tmp[:,1]))
self.infer_ancestral_sequences(marginal=True)
LH = self.sequence_LH()
deltaLH = LH - oldLH
oldLH = LH
self.logger("TreeAnc.optimize_tree_marginal: iteration %d, LH=%1.2f (%1.2f), delta branch_length=%1.4f, total branch_length %1.4f"%
(i, LH, deltaLH, dbl, self.tree.total_branch_length()), 2)
if deltaLH<LHtol:
self.logger("TreeAnc.optimize_tree_marginal: deltaLH=%f, stopping iteration."%deltaLH,1)
break
return ttconf.SUCCESS
def optimize_sequences_and_branch_length(self,*args, **kwargs):
"""This method is a shortcut for :py:meth:`treetime.TreeAnc.optimize_tree`
Deprecated in favor of 'optimize_tree'
"""
self.logger("Deprecation warning: 'optimize_sequences_and_branch_length' will be removed and replaced by 'optimize_tree'!", 1, warn=True)
self.optimize_seq_and_branch_len(*args,**kwargs)
def optimize_seq_and_branch_len(self,*args, **kwargs):
"""This method is a shortcut for :py:meth:`treetime.TreeAnc.optimize_tree`
Deprecated in favor of 'optimize_tree'
"""
self.logger("Deprecation warning: 'optimize_seq_and_branch_len' will be removed and replaced by 'optimize_tree'!", 1, warn=True)
self.optimize_tree(*args,**kwargs)
def optimize_tree(self,prune_short=True,
marginal_sequences=False, branch_length_mode='joint',
max_iter=5, infer_gtr=False, pc=1.0, **kwargs):
"""
Iteratively set branch lengths and reconstruct ancestral sequences until
the values of either former or latter do not change. The algorithm assumes
knowing only the topology of the tree, and requires that sequences are assigned
to all leaves of the tree.
The first step is to pre-reconstruct ancestral
states using Fitch reconstruction algorithm or ML using existing branch length
estimates. Then, optimize branch lengths and re-do reconstruction until
convergence using ML method.
Parameters
-----------
prune_short : bool
If True, the branches with zero optimal length will be pruned from
the tree, creating polytomies. The polytomies could be further
processed using :py:meth:`treetime.TreeTime.resolve_polytomies` from the TreeTime class.
marginal_sequences : bool
Assign sequences to their marginally most likely value, rather than
the values that are jointly most likely across all nodes.
branch_length_mode : str
'joint', 'marginal', or 'input'. Branch lengths are left unchanged in case
of 'input'. 'joint' and 'marginal' cause branch length optimization
while setting sequences to the ML value or tracing over all possible
internal sequence states.
max_iter : int
Maximal number of times sequence and branch length iteration are optimized
infer_gtr : bool
Infer a GTR model from the observed substitutions.
"""
if branch_length_mode=='marginal':
return self.optimize_tree_marginal(max_iter=max_iter, infer_gtr=infer_gtr, pc=pc, **kwargs)
elif branch_length_mode=='input':
N_diff = self.reconstruct_anc(method='probabilistic', infer_gtr=infer_gtr, pc=pc,
marginal=marginal_sequences, **kwargs)
return ttconf.success
elif branch_length_mode!='joint':
return ttconf.ERROR
self.logger("TreeAnc.optimize_tree: sequences...", 1)
N_diff = self.reconstruct_anc(method='probabilistic', infer_gtr=infer_gtr, pc=pc,
marginal=marginal_sequences, **kwargs)
self.optimize_branch_len(verbose=0, store_old=False, mode=branch_length_mode)
n = 0
while n<max_iter:
n += 1
if prune_short:
self.prune_short_branches()
N_diff = self.reconstruct_anc(method='probabilistic', infer_gtr=False,
marginal=marginal_sequences, **kwargs)
self.logger("TreeAnc.optimize_tree: Iteration %d."
" #Nuc changed since prev reconstructions: %d" %(n, N_diff), 2)
if N_diff < 1:
break
self.optimize_branch_lengths_joint(store_old=False)
self.tree.unconstrained_sequence_LH = (self.tree.sequence_LH*self.multiplicity).sum()
self._prepare_nodes() # fix dist2root and up-links after reconstruction
self.logger("TreeAnc.optimize_tree: Unconstrained sequence LH:%f" % self.tree.unconstrained_sequence_LH , 2)
return ttconf.SUCCESS
def infer_gtr_iterative(self, max_iter=10, site_specific=False, LHtol=0.1,
pc=1.0, normalized_rate=False):
"""infer GTR model by iteratively estimating ancestral sequences and the GTR model
Parameters
----------
max_iter : int, optional
maximal number of iterations
site_specific : bool, optional
use a site specific model
LHtol : float, optional
stop iteration when LH improvement falls below this cutoff
pc : float, optional
pseudocount to use
normalized_rate : bool, optional
set the overall rate to 1 (makes sense when optimizing branch lengths as well)
Returns
-------
str
success/failure code
"""
self.infer_ancestral_sequences(marginal=True)
old_p = np.copy(self.gtr.Pi)
old_LH = self.sequence_LH()
for i in range(max_iter):
self.infer_gtr(site_specific=site_specific, marginal=True,
normalized_rate=normalized_rate, pc=pc)
self.infer_ancestral_sequences(marginal=True)
dp = np.abs(self.gtr.Pi - old_p).mean() if self.gtr.Pi.shape==old_p.shape else np.nan
deltaLH = self.sequence_LH() - old_LH
old_p = np.copy(self.gtr.Pi)
old_LH = self.sequence_LH()
self.logger("TreeAnc.infer_gtr_iterative: iteration %d, LH=%1.2f | |
#!/usr/bin/python
# -*- coding=utf-8 -*-
# Project: eaglemine
# File: eaglemine.py
# Goal: The main routine of eaglemine
# Version: 1.0
# Created by @wenchieh on <12/17/2017>
#
__author__ = 'wenchieh'
# sys
import time
# third-party lib
import numpy as np
from scipy.sparse.linalg import svds
# project
from .eaglemine_model import EagleMineModel
from .tools.histogram_heuristic_generator import HistogramHeuristicGenerator
from .._model import DMmodel
from spartan.tensor.graph import Graph
class EagleMine( DMmodel ):
''' Micro-cluster detection: vision-guided anomaly detection.
Given a histogram derived from the correlated features of graph nodes,
EagleMine can be used to identify the micro-clusters in the graph,
these nodes in micro-clusters basically corresponds to some anomaly patterns.
'''
def __init__(self, voctype:str="dtmnorm", mode:int=2, mix_comps:int=2):
'''Initialization func for EagleMine
Parameters:
--------
:param voctype: str
vocabulary type: {"dtmnorm", "dmgauss"}
Default is "dtmnorm".
:param mode: int
The dimensions of features (the histogram).
Default is 2.
:param mix_comps: int
# mixture component for describing the major island.
Default is 2.
'''
self.voctype = voctype
self.mode = mode
self.ncomps = mix_comps
self.eaglemodel = None
self.__histgenerator__ = None
@classmethod
def __create__(cls, *args, **kwargs):
return cls(*args, **kwargs)
@staticmethod
def get_graph_spectral(sparse_matrix):
'''Extract the spectral features of the given sparse matrix (graph)
Parameter:
--------
:param sparse_matrix:
sparse matrix for adjacency matrix a graph.
'''
hub, _, auth = svds(1.0 * sparse_matrix.tocsr(), k=1, which='LM')
hub, auth = np.squeeze(np.array(hub)), np.squeeze(np.array(auth))
if abs(np.max(hub)) < abs(np.min(hub)):
hub *= -1
hub[hub < 0] = 0
if abs(np.max(auth)) < abs(np.min(auth)):
auth *= -1
auth[auth < 0] = 0
return hub, auth
def graph2feature(self, graph:Graph, feature_type:str='outdegree2hubness'):
'''Extract example correlated-features of the given graph and generate the corresponding histogram.
Parameters:
--------
:param graph: Graph
Given graph data
:param feature_type: str
Feature type for the graph node: {'outdegree2hubness', 'indegree2authority', 'degree2pagerank'}.
Default is 'outdegree2hubness'.
'''
if feature_type not in {'outdegree2hubness', 'indegree2authority', 'degree2pagerank'}:
raise NameError("Invalid feature type 'ty_type', which should be in {'outdegree2hubness', 'indegree2authority', 'degree2pagerank'}")
print("extract graph features ..... ")
start_tm = time.time()
outd, ind = np.asarray(graph.sm.sum(axis=1)).squeeze(), np.asarray(graph.sm.sum(axis=0)).squeeze()
hub, auth = self.get_graph_spectral(graph.sm)
if feature_type == 'outdegree2hubness' or feature_type == 'degree2pagerank':
deg, spec = outd, hub
else:
deg, spec = ind, auth
degreeidx = 0
print("done! @ {}s".format(time.time() - start_tm))
return degreeidx, np.column_stack((deg, spec))
def feature2histogram(self, feature:np.ndarray, degreeidx:int=0,
N_bins:int=80, base:int=10, mode:int=2, verbose:bool=True):
'''Construct histogram with given features
Parameters:
--------
:param feature: np.ndarray
The correlated features
:param degreeidx: int
The index of 'degree' ('out-degree', 'in-degree') in features, degreeidx=-1 if not containing 'degree' feature
Default is 0.
:param N_bins: int
The expected number of bins for generating histogram.
Default is 80.
:param base: int
The logarithmic base for bucketing the graph features.
Default is 10.
:param mode: int
The dimensions of features for constructing the histogram.
Default is 2.
:param verbose: bool
Whether output some running logs.
Default is True.
'''
n_samples, n_features = feature.shape
index = np.array([True] * n_samples)
for mod in range(mode):
index &= feature[:, mod] > 0
if verbose:
print("total shape: {}, valid samples:{}".format(feature.shape, np.sum(index)))
degree, feat = None, None
feature = feature[index, :]
if degreeidx >= 0:
degree = feature[:, degreeidx]
feat = np.delete(feature, degreeidx, axis=1)
del feature
else:
feat = feature
print("construct histogram ..... ")
start_tm = time.time()
self.__histgenerator__ = HistogramHeuristicGenerator()
if degree is not None:
self.__histgenerator__.set_deg_data(degree, feat)
self.__histgenerator__.histogram_gen(method="degree", N=N_bins, base=base)
else:
self.__histgenerator__.set_data(feat)
self.__histgenerator__.histogram_gen(method="N", N=N_bins, logarithmic=True, base=base)
print("done! @ {}s".format(time.time() - start_tm))
if verbose:
self.__histgenerator__.dump()
n_nodes = len(self.__histgenerator__.points_coord)
node2hcel = map(tuple, self.__histgenerator__.points_coord)
nodeidx2hcel = dict(zip(range(n_nodes), node2hcel))
return self.__histgenerator__.histogram, nodeidx2hcel, self.__histgenerator__.hpos2avgfeat
def set_histdata(self, histogram:dict, node2hcel:dict, hcel2avgfeat:dict, weighted_ftidx:int=0):
'''Set the histogram data
Parameters:
--------
:param histogram: dict
the format '(x,y,z,...): val', denoting that the cell (x,y,z,...) affiliates with value 'val'.
:param node2hcel: dict
graph node id (index) to histogram cell
:param hcel2avgfeat: dict
the average feature values for each histogram cell.
:param weighted_ftidx: int
The feature index as weight for suspiciousness metric.
Default is 0.
'''
self.histogram, self.hcelsusp_wt = list(), list()
for hcel in histogram.keys():
self.histogram.append(list(hcel) + [histogram.get(hcel)])
self.hcelsusp_wt.append(hcel2avgfeat[hcel][weighted_ftidx])
self.histogram = np.asarray(self.histogram)
self.hcelsusp_wt = np.asarray(self.hcelsusp_wt)
self.node2hcel = np.column_stack((list(node2hcel.keys()), list(node2hcel.values())))
self.eaglemodel = None
self.__histgenerator__ = None
self.hcel2label = None
def run(self, outs:str, waterlevel_step:float=0.2, prune_alpha:float=0.80,
min_pts:int=20, strictness:int=3, verbose:bool=True):
''' micro-cluster identification and refinement with water-level tree.
Parameters:
--------
:param outs: str
Output path for some temporary results.
:param waterlevel_step: float
Step size for raising the water level.
Default is 0.2.
:param prune_alpha: float
How proportion of pruning for level-tree.
Default is 0.80.
:param min_pts: int
The minimum number of points in a histogram cell.
Default is 20.
:param strictness: int
How strict should the anderson-darling test for normality. 0: not at all strict; 4: very strict
Default is 3.
:param verbose: bool
Whether output some running logs.
Default is True.
'''
print("*****************")
print("[0]. initialization")
self.eaglemodel = EagleMineModel(self.mode, self.ncomps)
self.eaglemodel.set_vocabulary(self.voctype)
self.eaglemodel.set_histogram(self.histogram)
print("*****************")
print("[1]. WaterLevelTree")
start_tm = time.time()
self.eaglemodel.leveltree_build(outs, waterlevel_step, prune_alpha, verbose=verbose)
end_tm1 = time.time()
print("done @ {}".format(end_tm1 - start_tm))
print("*****************")
print("[2]. TreeExplore")
self.eaglemodel.search(min_pts, strictness, verbose=verbose)
self.eaglemodel.post_stitch(strictness, verbose=verbose)
end_tm2 = time.time()
print("done @ {}".format(end_tm2 - end_tm1))
print("*****************")
print("[3]. node groups cluster and suspicious measure")
self.hcel2label, mdl = self.eaglemodel.cluster_remarks(strictness, verbose=verbose)
cluster_suspicious = self.eaglemodel.cluster_weighted_suspicious(self.hcelsusp_wt, strictness, verbose=verbose)
# print("description length (mdl): {}".format(mdl))
# print("suspicious result: {}".foramt(cluster_suspicious))
print('done @ {}'.format(time.time() - start_tm))
def __str__(self):
return str(vars(self))
def dump(self):
self.eaglemodel.dump()
if self.__histgenerator__ is not None:
self.__histgenerator__.dump()
print("done!")
def save(self, outfn_eaglemine:str, outfn_leveltree:str=None,
outfn_node2label:str=None, outfn_hcel2label:str=None,
comments:str="#", delimiter:str=";"):
'''save result of EagleMine
Parameters:
--------
:param outfn_eaglemine: str
Output path for eaglemine data
:param outfn_leveltree: str
Output path for the eater-level-tree data.
:param outfn_node2label: str
Output path for node2label data
:param outfn_hcel2label: str
Output path for hcel2label data
:param comments: str
The comments (start character) of inputs.
Default is "#".
:param delimiter: str
The separator of items in each line of inputs.
Default is ";".
'''
print("saving result")
start_tm = time.time()
self.eaglemodel.save(outfn_eaglemine)
if outfn_leveltree:
self.eaglemodel.leveltree.save_leveltree(outfn_leveltree, verbose=False)
nlabs = len(np.unique(list(self.hcel2label.values())))
if outfn_node2label is not None:
nnds = len(self.node2hcel)
with open(outfn_node2label, 'w') as ofp_node2lab:
ofp_node2lab.writelines(comments + " #pt: {}, #label: {}\n".format(nnds, nlabs))
for k in range(nnds):
nodeidx, hcel = self.node2hcel[k, 0], tuple(self.node2hcel[k, 1:])
nodelab = self.hcel2label.get(hcel, -1)
ofp_node2lab.writelines("{}{}{}\n".format(nodeidx, delimiter, nodelab))
ofp_node2lab.close()
if outfn_hcel2label is not None:
nhcels = len(self.hcelsusp_wt)
with open(outfn_hcel2label, 'w') as ofp_hcel2lab:
ofp_hcel2lab.writelines(comments + ' #hcel: {}, #label: {}'.format(nhcels, nlabs))
for hcel, lab in self.hcel2label.items():
hcel_str = delimiter.join(map(str, hcel))
ofp_hcel2lab.writelines("{}{}{}\n".format(hcel_str, delimiter, lab))
ofp_hcel2lab.close()
end_tm = time.time()
print("done! @ {}s".format(end_tm - start_tm))
def save_histogram(self,outfn_histogram:str=None, outfn_node2hcel:str=None, outfn_hcel2avgfeat:str=None,
comments:str="#", delimiter:str=","):
'''Save the histogram data for the graph.
Parameters:
--------
:param outfn_histogram: str
Output path of histogram.
The record in histogram should be in the format 'x,y,z,...,val', denoting that the cell (x, y, z, ...) affiliates with value 'val'
Default is None.
:param outfn_node2hcel: str
Output path of the file mapping the node to histogram cell.
Default is None.
:param outfn_hcel2avgfeat: str
Output path of the file mapping the histogram cell to the average features and #points
Default is None.
:param comments: str
The comments (start character) of inputs.
Default is "#".
:param delimiter: str
The separator of items in each line of inputs.
Default is ",".
'''
if self.__histgenerator__ is not None:
if outfn_histogram is not None:
self.__histgenerator__.save_histogram(outfn_histogram, delimiter, comments)
if outfn_node2hcel is not None:
self.__histgenerator__.save_pts_index(outfn_node2hcel, delimiter, comments)
if outfn_hcel2avgfeat is not None:
self.__histgenerator__.save_hpos2avgfeat(outfn_hcel2avgfeat, delimiter, comments)
else:
RuntimeError("No histogram generated for given graph!")
## TODO: relized the 'anomaly_detection' for the task based running. The root API may need to be refactored.
# def anomaly_detection(self, outs:str, waterlevel_step:float=0.2,
# prune_alpha:float=0.80, min_pts:int=20, strictness:int=3):
# '''anomaly detection with EagleMine
# Parameters:
# --------
# outs: str
# Output path for some temporary results.
# waterlevel_step: float
# Step size for raising the water level.
# Default is 0.2.
# prune_alpha: float
# How proportion of pruning for level-tree.
# Default is 0.80.
# min_pts: int
# The minimum number of points in a histogram cell.
# Default is 20.
# strictness: int
# How strict should the anderson-darling test for normality. 0: not at all strict; 4: | |
import os
import gym
from gym.envs.registration import register
from sinergym.utils.constants import *
from sinergym.utils.rewards import *
# Set __version__ in module
version_file = os.path.join(os.path.dirname(__file__), "version.txt")
with open(version_file, "r") as file_handler:
__version__ = file_handler.read().strip()
# ---------------------------------------------------------------------------- #
# 5ZoneAutoDXVAV Environments #
# ---------------------------------------------------------------------------- #
# 0) Demo environment
register(
id='Eplus-demo-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_PA_Pittsburgh-Allegheny.County.AP.725205_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
20.0,
23.5),
'range_comfort_summer': (
23.0,
26.0)},
'env_name': 'demo-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 1) 5-zone, hot weather, discrete actions
register(
id='Eplus-5Zone-hot-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (23.0, 26.0)
},
'env_name': '5Zone-hot-discrete-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 2) 5-zone, mixed weather, discrete actions
register(
id='Eplus-5Zone-mixed-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (23.0, 26.0)
},
'env_name': '5Zone-mixed-discrete-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 3) 5-zone, cool weather, discrete actions
register(
id='Eplus-5Zone-cool-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (23.0, 26.0)
},
'env_name': '5Zone-cool-discrete-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 4) 5-zone, hot weather, discrete actions and stochastic
register(
id='Eplus-5Zone-hot-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (23.0, 26.0)
},
'env_name': '5Zone-hot-discrete-stochastic-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 5) 5-zone, mixed weather, discrete actions and stochastic
register(
id='Eplus-5Zone-mixed-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (23.0, 26.0)
},
'env_name': '5Zone-mixed-discrete-stochastic-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 6) 5-zone, cool weather, discrete actions and stochastic
register(
id='Eplus-5Zone-cool-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (23.0, 26.0)
},
'env_name': '5Zone-cool-discrete-stochastic-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 7) 5-zone, hot weather, continuous actions
register(
id='Eplus-5Zone-hot-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
20.0,
23.5),
'range_comfort_summer': (
23.0,
26.0)},
'env_name': '5Zone-hot-continuous-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 8) 5-zone, mixed weather, continuous actions
register(
id='Eplus-5Zone-mixed-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
20.0,
23.5),
'range_comfort_summer': (
23.0,
26.0)},
'env_name': '5Zone-mixed-continuous-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 9) 5-zone, cool weather, continuous actions
register(
id='Eplus-5Zone-cool-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
20.0,
23.5),
'range_comfort_summer': (
23.0,
26.0)},
'env_name': '5Zone-cool-continuous-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 10) 5-zone, hot weather, continuous actions and stochastic
register(
id='Eplus-5Zone-hot-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'weather_variability': (
1.0,
0.0,
0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
20.0,
23.5),
'range_comfort_summer': (
23.0,
26.0)},
'env_name': '5Zone-hot-continuous-stochastic-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 11) 5-zone, mixed weather, continuous actions and stochastic
register(
id='Eplus-5Zone-mixed-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (23.0, 26.0)
},
'env_name': '5Zone-mixed-continuous-stochastic-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# 12) 5-zone, cool weather, continuous actions and stochastic
register(
id='Eplus-5Zone-cool-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '5ZoneAutoDXVAV.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'observation_space': DEFAULT_5ZONE_OBSERVATION_SPACE,
'observation_variables': DEFAULT_5ZONE_OBSERVATION_VARIABLES,
'action_space': DEFAULT_5ZONE_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_5ZONE_ACTION_VARIABLES,
'action_mapping': DEFAULT_5ZONE_ACTION_MAPPING,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': 'Zone Air Temperature(SPACE1-1)',
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (20.0, 23.5),
'range_comfort_summer': (23.0, 26.0)
},
'env_name': '5Zone-cool-continuous-stochastic-v1',
'config_params': DEFAULT_5ZONE_CONFIG_PARAMS})
# ---------------------------------------------------------------------------- #
# Datacenter Environments #
# ---------------------------------------------------------------------------- #
# 13) DC, hot weather, discrete actions
register(
id='Eplus-datacenter-hot-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'
],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (18, 27),
'range_comfort_summer': (18, 27)
},
'env_name': 'datacenter-hot-discrete-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS
}
)
# 14) DC, hot weather, continuous actions
register(
id='Eplus-datacenter-hot-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'
],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (18, 27),
'range_comfort_summer': (18, 27)
},
'env_name': 'datacenter-hot-continuous-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS
}
)
# 15) DC, hot weather, discrete actions and stochastic
register(
id='Eplus-datacenter-hot-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'
],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (18, 27),
'range_comfort_summer': (18, 27)
},
'env_name': 'datacenter-hot-discrete-stochastic-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS
}
)
# 16) DC, hot weather, continuous actions and stochastic
register(
id='Eplus-datacenter-hot-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_AZ_Davis-Monthan.AFB.722745_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'
],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (18, 27),
'range_comfort_summer': (18, 27)
},
'env_name': 'datacenter-hot-continuous-stochastic-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS
}
)
# 17) DC, mixed weather, discrete actions
register(
id='Eplus-datacenter-mixed-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'
],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (18, 27),
'range_comfort_summer': (18, 27)
},
'env_name': 'datacenter-mixed-discrete-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS})
# 18) DC, mixed weather, continuous actions
register(
id='Eplus-datacenter-mixed-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'
],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (18, 27),
'range_comfort_summer': (18, 27)
},
'env_name': 'datacenter-mixed-continuous-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS})
# 19) DC, mixed weather, discrete actions and stochastic
register(
id='Eplus-datacenter-mixed-discrete-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'
],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (18, 27),
'range_comfort_summer': (18, 27)
},
'env_name': 'datacenter-mixed-discrete-stochastic-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS})
# 20) DC, mixed weather, continuous actions and stochastic
register(
id='Eplus-datacenter-mixed-continuous-stochastic-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_NY_New.York-J.F.Kennedy.Intl.AP.744860_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_CONTINUOUS,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'weather_variability': (1.0, 0.0, 0.001),
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'
],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (18, 27),
'range_comfort_summer': (18, 27)
},
'env_name': 'datacenter-mixed-continuous-stochastic-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS})
# 21) DC, cool weather, discrete actions
register(
id='Eplus-datacenter-cool-discrete-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
'idf_file': '2ZoneDataCenterHVAC_wEconomizer.idf',
'weather_file': 'USA_WA_Port.Angeles-William.R.Fairchild.Intl.AP.727885_TMY3.epw',
'observation_space': DEFAULT_DATACENTER_OBSERVATION_SPACE,
'observation_variables': DEFAULT_DATACENTER_OBSERVATION_VARIABLES,
'action_space': DEFAULT_DATACENTER_ACTION_SPACE_DISCRETE,
'action_variables': DEFAULT_DATACENTER_ACTION_VARIABLES,
'action_mapping': DEFAULT_DATACENTER_ACTION_MAPPING,
'reward': LinearReward,
'reward_kwargs': {
'temperature_variable': [
'Zone Air Temperature(West Zone)',
'Zone Air Temperature(East Zone)'],
'energy_variable': 'Facility Total HVAC Electricity Demand Rate(Whole Building)',
'range_comfort_winter': (
18,
27),
'range_comfort_summer': (
18,
27)},
'env_name': 'datacenter-cool-discrete-v1',
'config_params': DEFAULT_DATACENTER_CONFIG_PARAMS})
# 22) DC, cool weather, continuous actions
register(
id='Eplus-datacenter-cool-continuous-v1',
entry_point='sinergym.envs:EplusEnv',
kwargs={
| |
<filename>dashboard/dashboard/graph_json.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Serves JSON for a graph.
This serves the JSON in the format consumed by Flot:
https://github.com/flot/flot/blob/master/API.md
"""
import copy
import datetime
import json
import logging
import re
from google.appengine.ext import ndb
from dashboard import alerts
from dashboard import can_bisect
from dashboard import list_tests
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
# Default number of points to fetch per test.
# This can be overridden by specifying num_points or start_rev and end_rev.
_DEFAULT_NUM_POINTS = 150
# If data for more than this many tests is requested for unselected tests,
# an empty response will be returned.
_MAX_UNSELECTED_TESTS = 55
# Dictionary mapping improvement directions constants to strings.
_BETTER_DICT = {
anomaly.UP: 'Higher',
anomaly.DOWN: 'Lower',
anomaly.UNKNOWN: '?',
}
class GraphJsonHandler(request_handler.RequestHandler):
"""Request handler for requests for graph data."""
def post(self):
"""Fetches and prepares data for a graph.
Request parameters:
graphs: A JSON serialization of a dict that contains the arguments
for GetGraphJson.
Outputs:
JSON serialization of data to be used for plotting a graph.
"""
self.response.headers.add_header('Access-Control-Allow-Origin', '*')
arguments = self._ParseRequestArguments()
if not arguments:
self.ReportError('Bad Graph JSON Request')
return
self.response.out.write(GetGraphJson(**arguments))
def _ParseRequestArguments(self):
"""Parses parameters from a request and checks for errors.
The post request is expected to pass one parameter, called 'graphs',
whose value is a JSON serialization of a dict of parameters.
Returns:
A dict of arguments that can be given to GetGraphJson, or None if
no valid dict of arguments can be constructed.
"""
graphs = self.request.get('graphs')
if graphs is None:
logging.error('No graph names specified')
return None
try:
graphs = json.loads(graphs)
except ValueError:
logging.error('Invalid JSON string for graphs')
return None
test_path_dict = graphs.get('test_path_dict')
test_path_list = graphs.get('test_path_list')
is_selected = graphs.get('is_selected')
if test_path_dict and test_path_list:
logging.error(
'Only one of test_path_dict and test_path_list may be specified')
return None
elif test_path_dict:
test_paths = _ResolveTestPathDict(test_path_dict, is_selected)
elif test_path_list:
test_paths = test_path_list
else:
logging.error(
'Exactly one of test_path_dict or test_path_list must be specified')
return None
arguments = {
'test_paths': test_paths,
'rev': _PositiveIntOrNone(graphs.get('rev')),
'num_points': (_PositiveIntOrNone(graphs.get('num_points'))
or _DEFAULT_NUM_POINTS),
'is_selected': is_selected,
'start_rev': _PositiveIntOrNone(graphs.get('start_rev')),
'end_rev': _PositiveIntOrNone(graphs.get('end_rev')),
}
return arguments
def _ResolveTestPathDict(test_path_dict, is_selected):
# TODO(eakuefner): These are old-style test path dicts which means that []
# doesn't mean 'no tests' but rather 'all tests'. Remove this hack.
if is_selected:
for test, selected in test_path_dict.iteritems():
if selected == []:
test_path_dict[test] = 'all'
return list_tests.GetTestsForTestPathDict(test_path_dict, bool(is_selected))
def GetGraphJson(
test_paths, rev=None, num_points=None,
is_selected=True, start_rev=None, end_rev=None):
"""Makes a JSON serialization of data for one chart with multiple series.
This function can return data for one chart (with multiple data series
plotted on it) with revisions on the x-axis, for a certain range of
revisions. The particular set of revisions to get data for can be specified
with the arguments rev, num_points, start_rev, and end_rev.
Args:
test_paths: A list of test paths.
rev: A revision number that the chart may be clamped relative to.
num_points: Number of points to plot.
is_selected: Whether this request is for selected or un-selected series.
start_rev: The lowest revision to get trace data for.
end_rev: The highest revision to get trace data for.
Returns:
JSON serialization of a dict with info that will be used to plot a chart.
"""
# TODO(qyearsley): Parallelize queries if possible.
# If a particular test has a lot of children, then a request will be made
# for data for a lot of unselected series, which may be very slow and may
# time out. In this case, return nothing.
# TODO(qyearsley): Stop doing this when there's a better solution (#1876).
if not is_selected and len(test_paths) > _MAX_UNSELECTED_TESTS:
return json.dumps({'data': {}, 'annotations': {}, 'error_bars': {}})
test_keys = map(utils.TestKey, test_paths)
test_entities = ndb.get_multi(test_keys)
test_entities = [t for t in test_entities if t is not None and t.has_rows]
# Filter out deprecated tests, but only if not all the tests are deprecated.
all_deprecated = all(t.deprecated for t in test_entities)
if not all_deprecated:
test_entities = [t for t in test_entities if not t.deprecated]
test_entities = [t for t in test_entities if t.has_rows]
revision_map = {}
num_points = num_points or _DEFAULT_NUM_POINTS
for test in test_entities:
_UpdateRevisionMap(revision_map, test, rev, num_points, start_rev, end_rev)
if not (start_rev and end_rev):
_ClampRevisionMap(revision_map, rev, num_points)
return _GetFlotJson(revision_map, test_entities)
def _PositiveIntOrNone(input_str):
"""Parses a string as a positive int if possible, otherwise returns None."""
if not input_str:
return None
try:
parsed = int(input_str)
except ValueError:
return None
if parsed < 0:
return None
return parsed
def _GetAnomalyAnnotationMap(test):
"""Gets a map of revision numbers to Anomaly entities."""
anomalies = anomaly.Anomaly.GetAlertsForTest(test)
return dict((a.end_revision, a) for a in anomalies)
def _UpdateRevisionMap(revision_map, parent_test, rev, num_points,
start_rev=None, end_rev=None):
"""Updates a dict of revisions to data point information for one test.
Depending on which arguments are given, there are several ways that
this function can update the dict of revisions:
1. If start_rev and end_rev are given, then revisions in this range
are used. The num_points argument is ignored.
2. Otherwise, if rev is given, then revisions before and after the
specified revision are used.
3. Otherwise, the latest revisions are used.
Args:
revision_map: A dict mapping revision numbers to dicts of point info.
Each point info dict contains information from a Row entity.
parent_test: A TestMetadata entity with Row children.
rev: The middle revision in the revision map (could be None).
num_points: The number of points to include in the revision map.
start_rev: Start revision number (optional).
end_rev: End revision number (optional).
"""
anomaly_annotation_map = _GetAnomalyAnnotationMap(parent_test.key)
assert(datastore_hooks.IsUnalteredQueryPermitted() or
not parent_test.internal_only)
if start_rev and end_rev:
rows = graph_data.GetRowsForTestInRange(
parent_test.key, start_rev, end_rev, True)
elif rev:
assert num_points
rows = graph_data.GetRowsForTestAroundRev(
parent_test.key, rev, num_points, True)
else:
assert num_points
rows = graph_data.GetLatestRowsForTest(
parent_test.key, num_points, privileged=True)
parent_test_key = parent_test.key.urlsafe()
for row in rows:
if row.revision not in revision_map:
revision_map[row.revision] = {}
revision_map[row.revision][parent_test_key] = _PointInfoDict(
row, anomaly_annotation_map)
def _PointInfoDict(row, anomaly_annotation_map):
"""Makes a dict of properties of one Row."""
point_info = {
'value': row.value,
'a_trace_rerun_options': _GetTracingRerunOptions(row),
}
tracing_uri = _GetTracingUri(row)
if tracing_uri:
point_info['a_tracing_uri'] = tracing_uri
if row.error is not None:
point_info['error'] = row.error
if anomaly_annotation_map.get(row.revision):
anomaly_entity = anomaly_annotation_map.get(row.revision)
point_info['g_anomaly'] = alerts.GetAnomalyDict(anomaly_entity)
row_dict = row.to_dict()
for name, val in row_dict.iteritems():
# TODO(sullivan): Remove this hack when data containing these broken links
# is sufficiently stale, after June 2016.
if (_IsMarkdownLink(val) and
val.find('(None') != -1 and
'a_stdio_uri_prefix' in row_dict):
# Many data points have been added with a stdio prefix expanded out to
# 'None' when 'a_stdio_uri_prefix' is set correctly. Fix them up.
# Add in the master name as well; if the waterfall is 'CamelCase' it
# should be 'camel.client.case'.
master_camel_case = utils.TestPath(row.parent_test).split('/')[0]
master_parts = re.findall('([A-Z][a-z0-9]+)', master_camel_case)
if master_parts and len(master_parts) == 2:
master_name = '%s.client.%s' % (
master_parts[1].lower(), master_parts[0].lower())
val = val.replace('(None', '(%s/%s/' % (
row_dict['a_stdio_uri_prefix'], master_name))
if _IsMarkdownLink(val) and 'Buildbot stdio' in val:
logdog_link, status_page_link = _GetUpdatedBuildbotLinks(val)
if logdog_link:
val = logdog_link
if status_page_link:
point_info['a_buildbot_status_page'] = status_page_link
if name.startswith('r_'):
point_info[name] = val
elif name == 'a_default_rev':
point_info['a_default_rev'] = val
elif name == 'timestamp':
point_info['timestamp'] = val
elif name.startswith('a_') and _IsMarkdownLink(val):
point_info[name] = val
return point_info
def _IsMarkdownLink(value):
"""Checks whether |value| is a markdown link."""
if not isinstance(value, str):
return False
return re.match(r'\[.+?\]\(.+?\)', value)
def _GetUpdatedBuildbotLinks(old_stdio_link):
# Links take a markdown format, [title](url)
logdog_markdown = None
logdog_link = utils.GetLogdogLogUriFromStdioLink(old_stdio_link)
if logdog_link:
logdog_markdown = '[Buildbot stdio](%s)' % logdog_link
buildbot_status_markdown = None
buildbot_link = utils.GetBuildbotStatusPageUriFromStdioLink(
old_stdio_link)
if buildbot_link:
buildbot_status_markdown = '[Buildbot status page](%s)' % buildbot_link
return logdog_markdown, buildbot_status_markdown
def _CreateLinkProperty(name, label, url):
"""Returns a dict containing markdown link to show on dashboard."""
return {'a_' + name: '[%s](%s)' % (label, url)}
def _GetSeriesAnnotations(tests):
"""Makes a list of metadata about each series (i.e. each test).
Args:
tests: List of TestMetadata entities.
Returns:
A list of dicts of metadata about each series. One dict for each test.
"""
series_annotations = {}
for i, test in enumerate(tests):
series_annotations[i] = {
'name': test.test_name,
'path': test.test_path,
'units': test.units,
'better': _BETTER_DICT[test.improvement_direction],
'description': test.description,
'can_bisect': can_bisect.IsValidTestForBisect(test.test_path),
}
return series_annotations
def _ClampRevisionMap(revision_map, rev, num_points):
"""Clamps the results down to the | |
69, 861, 21, 743, 609, 921,
484, 440, 12, 207, 900, 220, 584, 795, 568, 155, 226,
358, 216, 749, 640, 381, 958, 449, 867, 327, 376, 955,
690, 784, 588, 187, 394, 366, 592, 643, 444, 600, 855,
40, 822, 156, 851, 683, 303, 638, 818, 714, 303, 509,
353, 557, 51, 592, 663, 475, 725, 262, 508, 896, 894,
85, 815, 59, 953, 141, 629, 447, 418, 356, 672, 876,
396, 389, 281, 983, 675, 429, 115, 751, 719, 778, 453,
731, 628, 195, 201, 795, 462, 48, 253, 50, 714, 608,
76, 287, 836, 223, 735, 170, 155, 345, 977, 600, 812,
851, 559, 152, 256, 965, 586, 591, 966, 146, 868, 619,
705, 544, 180, 513, 509, 278, 274, 27, 850, 339, 681,
919, 470, 168, 362, 884, 982, 96, 728, 618, 635, 496,
813, 482, 982, 403, 219, 98, 105, 933, 437, 557, 28,
653, 939, 378, 534, 89, 73, 910, 741, 195, 4, 547,
916, 887, 912, 610, 815, 430, 827, 653, 803, 485, 422,
342, 612, 460, 684, 98, 970, 240, 688, 100, 932, 407,
523, 485, 70, 131, 996, 72, 563, 400, 50, 669, 607,
699, 921, 289, 713, 511, 639, 703, 269, 326, 650, 223,
993, 760, 894, 660, 617, 475, 205, 176, 139, 215, 107,
612, 651, 826, 922, 868, 632, 383, 474, 261, 384, 118,
470, 576, 92, 661, 663, 77, 624, 837, 541, 975, 608,
446, 787, 963, 647, 875, 147, 248, 121, 353, 697, 956,
478, 153, 779, 602, 486, 610, 138, 733, 276, 194, 516,
170, 574, 411, 242, 52, 242, 411, 827, 733, 662, 104,
111, 690, 909, 6, 193, 342, 51, 594, 441, 434, 548,
812, 792, 68, 376, 670, 512, 95, 268, 258, 283, 444,
136, 41, 551, 269, 68])
def test_snail_010(self):
self.assertEqual(snail([[831, 609, 235, 391, 645, 469, 352, 982, 96,
596, 79, 460, 438, 280, 390],
[639, 19, 257, 411, 862, 508, 652, 265, 609,
188, 443, 425, 584, 11, 329],
[616, 731, 442, 315, 530, 954, 306, 455, 808,
921, 604, 282, 695, 778, 711],
[205, 735, 423, 803, 480, 736, 47, 13, 478, 960,
268, 844, 611, 102, 489],
[271, 314, 134, 650, 634, 984, 925, 565, 67,
651, 139, 697, 735, 616, 83],
[124, 381, 202, 355, 488, 99, 269, 486, 900,
601, 449, 777, 607, 702, 504],
[259, 357, 104, 126, 784, 649, 30, 243, 716,
436, 917, 272, 629, 864, 131],
[333, 402, 81, 766, 352, 14, 227, 796, 572, 623,
176, 196, 870, 5, 822],
[469, 67, 286, 430, 711, 336, 78, 384, 71, 783,
832, 458, 940, 511, 160],
[783, 286, 352, 679, 233, 493, 549, 83, 137,
498, 450, 214, 856, 925, 585],
[360, 663, 80, 307, 411, 97, 42, 857, 865, 954,
30, 778, 691, 880, 898],
[354, 373, 818, 619, 465, 957, 268, 876, 19, 58,
163, 138, 283, 970, 267],
[773, 79, 892, 808, 810, 35, 147, 377, 502, 400,
742, 345, 35, 120, 859],
[933, 643, 548, 241, 817, 661, 936, 837, 571,
596, 177, 296, 531, 836, 805],
[915, 268, 534, 369, 791, 90, 843, 104, 293, 92,
270, 306, 226, 797, 903]]),
[831, 609, 235, 391, 645, 469, 352, 982, 96, 596, 79,
460, 438, 280, 390, 329, 711, 489, 83, 504, 131, 822,
160, 585, 898, 267, 859, 805, 903, 797, 226, 306, 270,
92, 293, 104, 843, 90, 791, 369, 534, 268, 915, 933,
773, 354, 360, 783, 469, 333, 259, 124, 271, 205, 616,
639, 19, 257, 411, 862, 508, 652, 265, 609, 188, 443,
425, 584, 11, 778, 102, 616, 702, 864, 5, 511, 925,
880, 970, 120, 836, 531, 296, 177, 596, 571, 837, 936,
661, 817, 241, 548, 643, 79, 373, 663, 286, 67, 402,
357, 381, 314, 735, 731, 442, 315, 530, 954, 306, 455,
808, 921, 604, 282, 695, 611, 735, 607, 629, 870, 940,
856, 691, 283, 35, 345, 742, 400, 502, 377, 147, 35,
810, 808, 892, 818, 80, 352, 286, 81, 104, 202, 134,
423, 803, 480, 736, 47, 13, 478, 960, 268, 844, 697,
777, 272, 196, 458, 214, 778, 138, 163, 58, 19, 876,
268, 957, 465, 619, 307, 679, 430, 766, 126, 355, 650,
634, 984, 925, 565, 67, 651, 139, 449, 917, 176, 832,
450, 30, 954, 865, 857, 42, 97, 411, 233, 711, 352,
784, 488, 99, 269, 486, 900, 601, 436, 623, 783, 498,
137, 83, 549, 493, 336, 14, 649, 30, 243, 716, 572,
71, 384, 78, 227, 796])
def test_snail_011(self):
self.assertEqual(snail([[900, 61, 525, 325, 420, 389, 718, 967, 116,
156, 877, 301, 815],
[325, 921, 851, 66, 226, 759, 166, 754, 972,
199, 26, 673, 81],
[953, 211, 277, 170, 498, 206, 11, 766, 742,
101, 661, 674, 501],
[613, 645, 897, 883, 24, 499, 408, 404, 93, 464,
815, 546, 830],
[103, 374, 494, 259, 597, 463, 83, 658, 867,
321, 311, 942, 265],
[279, 214, 989, 896, 644, 152, 130, 439, 917,
664, 293, 835, 469],
[114, 212, 935, 146, 589, 399, 128, 61, 242,
1000, 695, 340, 119],
[67, 258, 342, 377, 207, 186, 296, 249, 902,
607, 168, 151, 890],
[331, 274, 68, 643, 694, 918, 141, 718, 26, 659,
786, 247, 685],
[760, 128, 36, 115, 509, 292, 665, 755, 426,
380, 813, 1000, 366],
[459, 285, 200, 835, 851, 925, 217, 506, 749,
313, 546, 588, 902],
[475, 556, 67, 602, 323, 842, 248, 103, 413,
276, 513, 254, 478],
[478, 749, 519, 165, 158, 393, 952, 614, 291,
781, 344, 774, 42]]),
[900, 61, 525, 325, 420, 389, 718, 967, 116, 156, 877,
301, 815, 81, 501, 830, 265, 469, 119, 890, 685, 366,
902, 478, 42, 774, 344, 781, 291, 614, 952, 393, 158,
165, 519, 749, 478, 475, 459, 760, 331, 67, 114, 279,
103, 613, 953, 325, 921, 851, 66, 226, 759, 166, 754,
972, 199, 26, 673, 674, 546, 942, 835, 340, 151, 247,
1000, 588, 254, 513, 276, 413, 103, 248, 842, 323,
602, 67, 556, 285, 128, 274, 258, 212, 214, 374, 645,
211, 277, 170, 498, 206, 11, 766, 742, 101, 661, 815,
311, 293, 695, 168, 786, 813, 546, 313, 749, 506, 217,
925, 851, 835, 200, 36, 68, 342, 935, 989, 494, 897,
883, 24, 499, 408, 404, 93, 464, 321, 664, 1000, 607,
659, 380, 426, 755, 665, 292, 509, 115, 643, 377, 146,
896, 259, 597, 463, 83, 658, 867, 917, 242, 902, 26,
718, 141, 918, 694, 207, 589, 644, 152, 130, 439, 61,
249, 296, 186, 399, 128])
def test_snail_012(self):
self.assertEqual(snail([[743, 389, 404, 786, 6, 509, 887, 481, 858, 117,
671, 344, 7, 855, 551, 838, 500, 736, 981,
342],
[823, 940, 897, 877, 616, 425, 425, 300, 769,
780, 755, 505, 48, 339, 987, 285, 118, 949,
245, 644],
[68, 37, 515, 914, 885, 247, 552, 998, 53, 782,
913, 34, 413, 744, 462, 794, 589, 405, 233,
850],
[905, 208, 712, 995, 261, 154, 768, 118, 908,
452, 706, 612, 584, 638, 480, 969, 345, 780,
435, 898],
[714, 11, 654, 957, 564, 362, 231, 41, 721, 254,
202, 137, 126, 174, 832, 661, 382, 654, 516,
300],
[218, 667, 767, 610, 339, 531, 335, 234, 53,
735, 742, 818, 233, 26, 634, 229, 316, 436,
999, 348],
[943, 451, 142, 545, 186, 542, 934, 22, 287,
166, 63, 495, 13, 433, 739, 270, 535, 305, 272,
254],
[322, 892, 751, 856, 280, 706, 632, 796, 507,
633, 52, 86, 116, 753, 489, 294, 869, 135, 565,
102],
[691, 412, 615, 389, 973, 462, 624, 172, 170,
56, 744, 558, 339, 871, 878, 495, 810, 454,
349, 261],
[545, 378, 844, 494, 172, 465, | |
<reponame>AbhigyaShridhar/cc-licenses<gh_stars>10-100
# Standard library
from unittest import mock
# Third-party
import polib
from django.conf import settings
from django.test import TestCase, override_settings
from django.utils.translation import override
# First-party/Local
from legal_tools.models import (
FREEDOM_LEVEL_MAX,
FREEDOM_LEVEL_MID,
FREEDOM_LEVEL_MIN,
LegalCode,
Tool,
)
from legal_tools.tests.factories import (
LegalCodeFactory,
ToolFactory,
TranslationBranchFactory,
)
class LegalCodeQuerySetTest(TestCase):
def test_translated(self):
bylicense30ported = ToolFactory(
unit="by-nc", version="3.0", jurisdiction_code="ar"
)
bylicense30unported = ToolFactory(
unit="by-nc", version="3.0", jurisdiction_code=""
)
bylicense40 = ToolFactory(
unit="by-nc", version="4.0", jurisdiction_code=""
)
zerov1declaration = ToolFactory(
unit="zero", version="1.0", jurisdiction_code=""
)
should_be_translated = [
LegalCodeFactory(tool=bylicense40),
LegalCodeFactory(tool=zerov1declaration),
]
should_not_be_translated = [
LegalCodeFactory(tool=bylicense30ported),
LegalCodeFactory(tool=bylicense30unported),
]
self.assertCountEqual(
should_be_translated, list(LegalCode.objects.translated())
)
self.assertCountEqual(
should_not_be_translated,
set(LegalCode.objects.all()) - set(LegalCode.objects.translated()),
)
def test_valid(self):
bylicense30ported = ToolFactory(
unit="by-nc", version="3.0", jurisdiction_code="ar"
)
bylicense30unported = ToolFactory(
unit="by-nc", version="3.0", jurisdiction_code=""
)
nonbylicense30ported = ToolFactory(
unit="xyz", version="3.0", jurisdiction_code="ar"
)
nonbylicense30unported = ToolFactory(
unit="xyz", version="3.0", jurisdiction_code=""
)
bylicense40 = ToolFactory(
unit="by-nc", version="4.0", jurisdiction_code=""
)
nonbylicense40 = ToolFactory(
unit="xyz", version="4.0", jurisdiction_code=""
)
zerov1declaration = ToolFactory(
unit="zero", version="1.0", jurisdiction_code=""
)
nonzerov1declaration = ToolFactory(
unit="xyz", version="1.0", jurisdiction_code=""
)
# Test valid()
should_be_valid = [
LegalCodeFactory(tool=bylicense30ported),
LegalCodeFactory(tool=bylicense30unported),
LegalCodeFactory(tool=bylicense40),
LegalCodeFactory(tool=zerov1declaration),
]
should_not_be_valid = [
LegalCodeFactory(tool=nonbylicense30ported),
LegalCodeFactory(tool=nonbylicense30unported),
LegalCodeFactory(tool=nonbylicense40),
LegalCodeFactory(tool=nonzerov1declaration),
]
self.assertCountEqual(should_be_valid, list(LegalCode.objects.valid()))
self.assertCountEqual(
should_not_be_valid,
set(LegalCode.objects.all()) - set(LegalCode.objects.valid()),
)
# Test validgroups()
self.assertCountEqual(
should_be_valid,
list(LegalCode.objects.validgroups()["Licenses 4.0"])
+ list(LegalCode.objects.validgroups()["Licenses 3.0"])
+ list(LegalCode.objects.validgroups()["Public Domain all"]),
)
self.assertCountEqual(
should_not_be_valid,
set(LegalCode.objects.all())
- set(
list(LegalCode.objects.validgroups()["Licenses 4.0"])
+ list(LegalCode.objects.validgroups()["Licenses 3.0"])
+ list(LegalCode.objects.validgroups()["Public Domain all"])
),
)
class LegalCodeModelTest(TestCase):
def test_str(self):
LegalCodeFactory()
legal_code = LegalCode.objects.first()
self.assertEqual(
str(legal_code),
f"LegalCode<{legal_code.language_code},"
f" {str(legal_code.tool)}>",
)
def test_translation_domain(self):
data = [
# (expected, unit, version, jurisdiction, language)
("by-sa_30", "by-sa", "3.0", "", "fr"),
("by-sa_30_xx", "by-sa", "3.0", "xx", "fr"),
]
for expected, unit, version, jurisdiction, language in data:
with self.subTest(expected):
legal_code = LegalCodeFactory(
tool__unit=unit,
tool__version=version,
tool__jurisdiction_code=jurisdiction,
language_code=language,
)
self.assertEqual(expected, legal_code.translation_domain)
@override_settings(DATA_REPOSITORY_DIR="/foo")
def test_translation_filename(self):
data = [
# (expected, unit, version, jurisdiction, language)
(
"/foo/legalcode/de/LC_MESSAGES/by-sa_03.po",
"by-sa",
"0.3",
"",
"de",
),
(
"/foo/legalcode/de/LC_MESSAGES/by-sa_03_xx.po",
"by-sa",
"0.3",
"xx",
"de",
),
]
for expected, unit, version, jurisdiction, language in data:
with self.subTest(expected):
tool = ToolFactory(
unit=unit,
version=version,
jurisdiction_code=jurisdiction,
)
self.assertEqual(
expected,
LegalCodeFactory(
tool=tool, language_code=language
).translation_filename(),
)
# NOTE: plaintext functionality disabled
# def test_plain_text_url(self):
# lc0 = LegalCodeFactory(
# tool__unit="by",
# tool__version="4.0",
# tool__jurisdiction_code="",
# language_code="en",
# )
# lc1 = LegalCodeFactory(
# tool__unit="by",
# tool__version="4.0",
# tool__jurisdiction_code="",
# language_code="fr",
# )
# lc2 = LegalCodeFactory(
# tool__unit="by",
# tool__version="4.0",
# tool__jurisdiction_code="",
# language_code="ar",
# )
# self.assertEqual(
# lc0.plain_text_url,
# f"{lc0.legal_code_url.replace('legalcode.en', 'legalcode.txt')}",
# )
# self.assertEqual(lc1.plain_text_url, "")
# self.assertEqual(lc2.plain_text_url, "")
def test_get_pofile(self):
legal_code = LegalCodeFactory()
test_pofile = polib.POFile()
test_translation_filename = "/dev/null"
with mock.patch.object(LegalCode, "translation_filename") as mock_tf:
mock_tf.return_value = test_translation_filename
with mock.patch.object(polib, "pofile") as mock_pofile:
mock_pofile.return_value = test_pofile
result = legal_code.get_pofile()
mock_pofile.assert_called_with("", encoding="utf-8")
self.assertEqual(test_pofile, result)
@override_settings(DATA_REPOSITORY_DIR="/some/dir")
def test_get_english_pofile_path(self):
legal_code = LegalCodeFactory(
tool__version="4.0",
tool__unit="by-sa",
language_code="de",
)
legal_code_en = LegalCodeFactory(
tool=legal_code.tool, language_code=settings.LANGUAGE_CODE
)
expected_path = "/some/dir/legalcode/en/LC_MESSAGES/by-sa_40.po"
with mock.patch.object(
Tool, "get_legal_code_for_language_code"
) as mock_glfl:
mock_glfl.return_value = legal_code_en
self.assertEqual(
expected_path, legal_code.get_english_pofile_path()
)
self.assertEqual(
expected_path, legal_code_en.get_english_pofile_path()
)
mock_glfl.assert_called_with(settings.LANGUAGE_CODE)
@override_settings(DATA_REPOSITORY_DIR="/some/dir")
def test_get_translation_object(self):
# get_translation_object on the model calls the
# i18n.utils.get_translation_object.
legal_code = LegalCodeFactory(
tool__version="4.0",
tool__unit="by-sa",
language_code="de",
)
with mock.patch(
"legal_tools.models.get_translation_object"
) as mock_djt:
legal_code.get_translation_object()
mock_djt.assert_called_with(
django_language_code="de", domain="by-sa_40"
)
def test_branch_name(self):
legal_code = LegalCodeFactory(
tool__version="4.0",
tool__unit="by-sa",
language_code="de",
)
self.assertEqual("cc4-de", legal_code.branch_name())
legal_code = LegalCodeFactory(
tool__version="3.5",
tool__unit="other",
language_code="de",
)
self.assertEqual("other-35-de", legal_code.branch_name())
legal_code = LegalCodeFactory(
tool__version="3.5",
tool__unit="other",
language_code="de",
tool__jurisdiction_code="xyz",
)
self.assertEqual("other-35-de-xyz", legal_code.branch_name())
def test_has_english(self):
tool = ToolFactory()
lc_fr = LegalCodeFactory(tool=tool, language_code="fr")
self.assertFalse(lc_fr.has_english())
lc_en = LegalCodeFactory(tool=tool, language_code="en")
self.assertTrue(lc_fr.has_english())
self.assertTrue(lc_en.has_english())
# get_publish_files BY-NC-ND 4.0 #########################################
# BY-NC-ND 4.0 is an international license with multiple languages
def test_get_publish_files_by_nc_nd4_deed_en(self):
legal_code = LegalCodeFactory(
tool__category="licenses",
tool__unit="by-nc-nd",
tool__version="4.0",
language_code="en",
)
returned_list = legal_code.get_publish_files("deed")
self.assertEqual(
[
# relpath
"licenses/by-nc-nd/4.0/deed.en.html",
# symlinks
["deed.html", "index.html"],
# redirects_data
[
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"licenses/by-nc-nd/4.0/deed.en-us.html"
),
"title": "",
},
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"licenses/by-nc-nd/4.0/deed.en_us.html"
),
"title": "",
},
],
],
returned_list,
)
def test_get_publish_files_by_nc_nd4_legal_code_en(self):
legal_code = LegalCodeFactory(
tool__category="licenses",
tool__unit="by-nc-nd",
tool__version="4.0",
language_code="en",
)
returned_list = legal_code.get_publish_files("legalcode")
self.assertEqual(
[
# relpath
"licenses/by-nc-nd/4.0/legalcode.en.html",
# symlinks
["legalcode.html"],
# redirects_data
[
{
"destination": "legalcode.en",
"language_code": "en",
"redirect_file": (
"licenses/by-nc-nd/4.0/legalcode.en-us.html"
),
"title": "",
},
{
"destination": "legalcode.en",
"language_code": "en",
"redirect_file": (
"licenses/by-nc-nd/4.0/legalcode.en_us.html"
),
"title": "",
},
],
],
returned_list,
)
def test_get_publish_files_by_nc_nd_4_deed_zh_hant(self):
legal_code = LegalCodeFactory(
tool__category="licenses",
tool__unit="by-nc-nd",
tool__version="4.0",
language_code="zh-hant",
)
returned_list = legal_code.get_publish_files("deed")
self.assertEqual(
[
# relpath
"licenses/by-nc-nd/4.0/deed.zh-hant.html",
# symlinks
[],
# redirects_data
[
{
"destination": "deed.zh-hant",
"language_code": "zh-hant",
"redirect_file": (
"licenses/by-nc-nd/4.0/deed.zh-tw.html"
),
"title": "",
},
{
"destination": "deed.zh-hant",
"language_code": "zh-hant",
"redirect_file": (
"licenses/by-nc-nd/4.0/deed.zh_tw.html"
),
"title": "",
},
],
],
returned_list,
)
def test_get_publish_files_by_nc_nd_4_legal_code_zh_hant(self):
legal_code = LegalCodeFactory(
tool__category="licenses",
tool__unit="by-nc-nd",
tool__version="4.0",
language_code="zh-hant",
)
returned_list = legal_code.get_publish_files("legalcode")
self.assertEqual(
[
# relpath
"licenses/by-nc-nd/4.0/legalcode.zh-hant.html",
# symlinks
[],
# redirects_data
[
{
"destination": "legalcode.zh-hant",
"language_code": "zh-hant",
"redirect_file": (
"licenses/by-nc-nd/4.0/legalcode.zh-tw.html"
),
"title": "",
},
{
"destination": "legalcode.zh-hant",
"language_code": "zh-hant",
"redirect_file": (
"licenses/by-nc-nd/4.0/legalcode.zh_tw.html"
),
"title": "",
},
],
],
returned_list,
)
# get_publish_files BY-NC 3.0 CA #########################################
# BY-NC 3.0 CA is a ported license with multiple languages
def test_get_publish_files_by_nc3_deed_ca_en(self):
legal_code = LegalCodeFactory(
tool__category="licenses",
tool__jurisdiction_code="ca",
tool__unit="by-nc",
tool__version="3.0",
language_code="en",
)
returned_list = legal_code.get_publish_files("deed")
self.assertEqual(
[
# relpath
"licenses/by-nc/3.0/ca/deed.en.html",
# symlinks
["deed.html", "index.html"],
# redirects_data
[
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"licenses/by-nc/3.0/ca/deed.en-us.html"
),
"title": "",
},
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"licenses/by-nc/3.0/ca/deed.en_us.html"
),
"title": "",
},
],
],
returned_list,
)
def test_get_publish_files_by_nc3_legal_code_ca_en(self):
legal_code = LegalCodeFactory(
tool__category="licenses",
tool__jurisdiction_code="ca",
tool__unit="by-nc",
tool__version="3.0",
language_code="en",
)
returned_list = legal_code.get_publish_files("legalcode")
self.assertEqual(
[
# relpath
"licenses/by-nc/3.0/ca/legalcode.en.html",
# symlinks
["legalcode.html"],
# redirects_data
[
{
"destination": "legalcode.en",
"language_code": "en",
"redirect_file": (
"licenses/by-nc/3.0/ca/legalcode.en-us.html"
),
"title": "",
},
{
"destination": "legalcode.en",
"language_code": "en",
"redirect_file": (
"licenses/by-nc/3.0/ca/legalcode.en_us.html"
),
"title": "",
},
],
],
returned_list,
)
# get_publish_files BY-SA 3.0 AM #########################################
# BY-SA 3.0 AM is a ported license with a single language
def test_get_publish_files_by_sa3_deed_am_hy(self):
legal_code = LegalCodeFactory(
tool__category="licenses",
tool__jurisdiction_code="am",
tool__unit="by-sa",
tool__version="3.0",
language_code="hy",
)
returned_list = legal_code.get_publish_files("deed")
self.assertEqual(
[
# relpath
"licenses/by-sa/3.0/am/deed.hy.html",
# symlinks
["deed.html", "index.html"],
# redirects_data
[],
],
returned_list,
)
def test_get_publish_files_by_sa3_legal_code_am_hy(self):
legal_code = LegalCodeFactory(
tool__category="licenses",
tool__jurisdiction_code="am",
tool__unit="by-sa",
tool__version="3.0",
language_code="hy",
)
returned_list = legal_code.get_publish_files("legalcode")
self.assertEqual(
[
# relpath
"licenses/by-sa/3.0/am/legalcode.hy.html",
# symlinks
["legalcode.html"],
# redirects_data
[],
],
returned_list,
)
# get_publish_files CC0 1.0 ##############################################
# CC0 1.0 is an unported declaration with multiple languages
def test_get_publish_files_zero_deed_en(self):
legal_code = LegalCodeFactory(
tool__category="publicdomain",
tool__unit="zero",
tool__version="1.0",
language_code="en",
)
returned_list = legal_code.get_publish_files("deed")
self.assertEqual(
[
# relpath
"publicdomain/zero/1.0/deed.en.html",
# symlinks
["deed.html", "index.html"],
# redirects_data
[
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"publicdomain/zero/1.0/deed.en-us.html"
),
"title": "",
},
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"publicdomain/zero/1.0/deed.en_us.html"
),
"title": "",
},
],
],
returned_list,
)
def test_get_publish_files_zero_legal_code_en(self):
legal_code = LegalCodeFactory(
tool__category="publicdomain",
tool__unit="zero",
tool__version="1.0",
language_code="en",
)
returned_list = legal_code.get_publish_files("legalcode")
self.assertEqual(
[
# relpath
"publicdomain/zero/1.0/legalcode.en.html",
# symlinks
["legalcode.html"],
# redirects_data
[
{
"destination": "legalcode.en",
"language_code": "en",
"redirect_file": (
"publicdomain/zero/1.0/legalcode.en-us.html"
),
"title": "",
},
{
"destination": "legalcode.en",
"language_code": "en",
"redirect_file": (
"publicdomain/zero/1.0/legalcode.en_us.html"
),
"title": "",
},
],
],
returned_list,
)
def test_get_publish_files_zero_deed_nl(self):
legal_code = LegalCodeFactory(
tool__category="publicdomain",
tool__unit="zero",
tool__version="1.0",
language_code="nl",
)
returned_list = legal_code.get_publish_files("deed")
self.assertEqual(
[
# relpath
"publicdomain/zero/1.0/deed.nl.html",
# symlinks
[],
# redirects_data
[],
],
returned_list,
)
# get_publish_files Mark 1.0 #############################################
# Mark 1.0 is an unported deed-only declaration
def test_get_publish_files_mark_deed(self):
legal_code = LegalCodeFactory(
tool__category="publicdomain",
tool__deed_only=True,
tool__unit="mark",
tool__version="1.0",
language_code="en",
)
returned_list = legal_code.get_publish_files("deed")
self.assertEqual(
[
# relpath
"publicdomain/mark/1.0/deed.en.html",
# symlinks
["deed.html", "index.html"],
# redirects_data
[
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"publicdomain/mark/1.0/deed.en-us.html"
),
"title": "",
},
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"publicdomain/mark/1.0/deed.en_us.html"
),
"title": "",
},
],
],
returned_list,
)
def test_get_publish_files_mark_legal_code(self):
legal_code = LegalCodeFactory(
tool__category="publicdomain",
tool__deed_only=True,
tool__unit="mark",
tool__version="1.0",
language_code="en",
)
returned_list = legal_code.get_publish_files("legalcode")
self.assertEqual(
[
# relpath
None,
# symlinks
[],
# redirects_data
[
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"publicdomain/mark/1.0/legalcode.en-us.html"
),
"title": "",
},
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"publicdomain/mark/1.0/legalcode.en_us.html"
),
"title": "",
},
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"publicdomain/mark/1.0/legalcode.en.html"
),
"title": "",
},
{
"destination": "deed.en",
"language_code": "en",
"redirect_file": (
"publicdomain/mark/1.0/legalcode.html"
),
"title": "",
},
],
],
returned_list,
)
# get_redirect_pairs #####################################################
def test_get_redirect_pairs_4(self):
tool = ToolFactory(category="license", unit="by", version="4.0")
legal_code = LegalCodeFactory(tool=tool, language_code="nl")
redirect_pairs = legal_code.get_redirect_pairs("deed")
self.assertEqual(
[["license/by/4.0/deed.NL", "license/by/4.0/deed.nl"]],
redirect_pairs,
)
class ToolModelTest(TestCase):
def test_nc(self):
self.assertFalse(ToolFactory(unit="xyz").nc)
self.assertTrue(ToolFactory(unit="by-nc-xyz").nc)
def test_nd(self):
self.assertFalse(ToolFactory(unit="xyz").nd)
self.assertTrue(ToolFactory(unit="by-nd-xyz").nd)
def test_sa(self):
self.assertFalse(ToolFactory(unit="xyz").sa)
self.assertTrue(ToolFactory(unit="xyz-sa").sa)
def test_get_metadata(self):
# Ported
tool = ToolFactory(
**{
"canonical_url": (
| |
local_var_params['status'])) # noqa: E501
collection_formats['status'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['petstore_auth'] # noqa: E501
return self.api_client.call_api(
'/pet/findByStatus', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Pet]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def find_pets_by_tags(self, tags, **kwargs): # noqa: E501
"""Finds Pets by tags # noqa: E501
Multiple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_pets_by_tags(tags, async_req=True)
>>> result = thread.get()
Args:
tags (list[str]): Tags to filter by
Keyword Args:
async_req (bool): execute request asynchronously
param _preload_content (bool): if False, the urllib3.HTTPResponse
object will be returned without reading/decoding response data.
Default is True.
param _request_timeout (float/tuple): timeout setting for this
request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read)
timeouts.
Returns:
list[Pet]:
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.find_pets_by_tags_with_http_info(tags, **kwargs) # noqa: E501
else:
(data) = self.find_pets_by_tags_with_http_info(tags, **kwargs) # noqa: E501
return data
def find_pets_by_tags_with_http_info(self, tags, **kwargs): # noqa: E501
"""Finds Pets by tags # noqa: E501
Multiple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_pets_by_tags_with_http_info(tags, async_req=True)
>>> result = thread.get()
Args:
tags (list[str]): Tags to filter by
Keyword Args:
async_req (bool): execute request asynchronously
param _preload_content (bool): if False, the urllib3.HTTPResponse
object will be returned without reading/decoding response data.
Default is True.
param _request_timeout (float/tuple): timeout setting for this
request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read)
timeouts.
Returns:
list[Pet]:
"""
local_var_params = locals()
all_params = ['tags'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method find_pets_by_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'tags' is set
if ('tags' not in local_var_params or
local_var_params['tags'] is None):
raise ApiValueError("Missing the required parameter `tags` when calling `find_pets_by_tags`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags'])) # noqa: E501
collection_formats['tags'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['petstore_auth'] # noqa: E501
return self.api_client.call_api(
'/pet/findByTags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Pet]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pet_by_id(self, pet_id, **kwargs): # noqa: E501
"""Find pet by ID # noqa: E501
Returns a single pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pet_by_id(pet_id, async_req=True)
>>> result = thread.get()
Args:
pet_id (int): ID of pet to return
Keyword Args:
async_req (bool): execute request asynchronously
param _preload_content (bool): if False, the urllib3.HTTPResponse
object will be returned without reading/decoding response data.
Default is True.
param _request_timeout (float/tuple): timeout setting for this
request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read)
timeouts.
Returns:
Pet:
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_pet_by_id_with_http_info(pet_id, **kwargs) # noqa: E501
else:
(data) = self.get_pet_by_id_with_http_info(pet_id, **kwargs) # noqa: E501
return data
def get_pet_by_id_with_http_info(self, pet_id, **kwargs): # noqa: E501
"""Find pet by ID # noqa: E501
Returns a single pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pet_by_id_with_http_info(pet_id, async_req=True)
>>> result = thread.get()
Args:
pet_id (int): ID of pet to return
Keyword Args:
async_req (bool): execute request asynchronously
param _preload_content (bool): if False, the urllib3.HTTPResponse
object will be returned without reading/decoding response data.
Default is True.
param _request_timeout (float/tuple): timeout setting for this
request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read)
timeouts.
Returns:
Pet:
"""
local_var_params = locals()
all_params = ['pet_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pet_by_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'pet_id' is set
if ('pet_id' not in local_var_params or
local_var_params['pet_id'] is None):
raise ApiValueError("Missing the required parameter `pet_id` when calling `get_pet_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'pet_id' in local_var_params:
path_params['petId'] = local_var_params['pet_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/pet/{petId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Pet', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_pet(self, body, **kwargs): # noqa: E501
"""Update an existing pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_pet(body, async_req=True)
>>> result = thread.get()
Args:
body (Pet): Pet object that needs to be added to the store
Keyword Args:
async_req (bool): execute request asynchronously
param _preload_content (bool): if False, the urllib3.HTTPResponse
object will be returned without reading/decoding response data.
Default is True.
param _request_timeout (float/tuple): timeout setting for this
request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read)
timeouts.
Returns:
None:
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_pet_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.update_pet_with_http_info(body, **kwargs) # noqa: E501
return data
def update_pet_with_http_info(self, body, **kwargs): # noqa: E501
"""Update an existing pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_pet_with_http_info(body, async_req=True)
>>> result = thread.get()
Args:
body (Pet): Pet object that needs to be added to the store
Keyword Args:
async_req (bool): execute request asynchronously
param _preload_content (bool): if False, the urllib3.HTTPResponse
object will be returned without reading/decoding response data.
Default is True.
param _request_timeout (float/tuple): timeout setting for this
request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read)
timeouts.
Returns:
None:
"""
local_var_params = locals()
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_pet" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `update_pet`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['petstore_auth'] # noqa: E501
return self.api_client.call_api(
'/pet', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_pet_with_form(self, pet_id, **kwargs): # noqa: E501
| |
<reponame>Rakshit2214/virtual-assistant-Python-<gh_stars>1-10
from __future__ import print_function
import numpy as np
from pydub import AudioSegment
import random
import sys
import os
from scipy.io import wavfile
print("by Logical Spot")
import tensorflow
import argparse
parser = argparse.ArgumentParser(description='Dir For Dataset e.g. neg an pos')
parser.add_argument('--input', action='store', type=str, required=True)
parser.add_argument('--epochs', action='store', type=int, required=True)
parser.add_argument('--tx', action='store', type=int, default=5511)
parser.add_argument('--nf', action='store', type=int, default=101)
parser.add_argument('--ty', action='store', type=int, default=1375)
args = parser.parse_args()
pathD = args.input
import matplotlib.pyplot as plt
from scipy.io import wavfile
import os
from pydub import AudioSegment
# Calculate and plot spectrogram for a wav audio file
def graph_spectrogram(wav_file):
rate, data = get_wav_info(wav_file)
nfft = 200 # Length of each window segment
fs = 8000 # Sampling frequencies
noverlap = 120 # Overlap between windows
nchannels = data.ndim
if nchannels == 1:
pxx, freqs, bins, im = plt.specgram(data, nfft, fs, noverlap = noverlap)
elif nchannels == 2:
pxx, freqs, bins, im = plt.specgram(data[:,0], nfft, fs, noverlap = noverlap)
return pxx
# Load a wav file
def get_wav_info(wav_file):
rate, data = wavfile.read(wav_file)
return rate, data
# Used to standardize volume of audio clip
def match_target_amplitude(sound, target_dBFS):
change_in_dBFS = target_dBFS - sound.dBFS
return sound.apply_gain(change_in_dBFS)
# Load raw audio files for speech synthesis
def load_raw_audio():
activates = []
backgrounds = []
negatives = []
for filename in os.listdir(pathD + "/positives/"):
if filename.endswith("wav"):
activate = AudioSegment.from_wav(pathD + "/positives/"+filename)
activates.append(activate)
for filename in os.listdir("./backgrounds/"):
if filename.endswith("wav"):
background = AudioSegment.from_wav("./backgrounds/"+filename)
backgrounds.append(background)
for filename in os.listdir(pathD + "/negatives/"):
if filename.endswith("wav"):
negative = AudioSegment.from_wav(pathD + "/negatives/"+filename)
negatives.append(negative)
return activates, negatives, backgrounds
Tx = args.tx # The number of time steps input to the model from the spectrogram
n_freq = args.nf # Number of frequencies input to the model at each time step of the spectrogram
Ty = args.ty # The number of time steps in the output of our model
activates, negatives, backgrounds = load_raw_audio()
def get_random_time_segment(segment_ms):
"""
Gets a random time segment of duration segment_ms in a 10,000 ms audio clip.
Arguments:
segment_ms -- the duration of the audio clip in ms ("ms" stands for "milliseconds")
Returns:
segment_time -- a tuple of (segment_start, segment_end) in ms
"""
segment_start = np.random.randint(low=0, high=10000-segment_ms) # Make sure segment doesn't run past the 10sec background
segment_end = segment_start + segment_ms - 1
return (segment_start, segment_end)
def is_overlapping(segment_time, previous_segments):
"""
Checks if the time of a segment overlaps with the times of existing segments.
Arguments:
segment_time -- a tuple of (segment_start, segment_end) for the new segment
previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments
Returns:
True if the time segment overlaps with any of the existing segments, False otherwise
"""
segment_start, segment_end = segment_time
# Step 1: Initialize overlap as a "False" flag. (≈ 1 line)
overlap = False
# Step 2: loop over the previous_segments start and end times.
# Compare start/end times and set the flag to True if there is an overlap (≈ 3 lines)
for previous_start, previous_end in previous_segments:
if segment_start <= previous_end and segment_end >= previous_start:
overlap = True
return overlap
def insert_audio_clip(background, audio_clip, previous_segments):
"""
Insert a new audio segment over the background noise at a random time step, ensuring that the
audio segment does not overlap with existing segments.
Arguments:
background -- a 10 second background audio recording.
audio_clip -- the audio clip to be inserted/overlaid.
previous_segments -- times where audio segments have already been placed
Returns:
new_background -- the updated background audio
"""
# Get the duration of the audio clip in ms
segment_ms = len(audio_clip)
# Step 1: Use one of the helper functions to pick a random time segment onto which to insert
# the new audio clip. (≈ 1 line)
segment_time = get_random_time_segment(segment_ms)
# Step 2: Check if the new segment_time overlaps with one of the previous_segments. If so, keep
# picking new segment_time at random until it doesn't overlap. (≈ 2 lines)
while is_overlapping(segment_time, previous_segments):
segment_time = get_random_time_segment(segment_ms)
# Step 3: Add the new segment_time to the list of previous_segments (≈ 1 line)
previous_segments.append(segment_time)
# Step 4: Superpose audio segment and background
new_background = background.overlay(audio_clip, position = segment_time[0])
return new_background, segment_time
def insert_ones(y, segment_end_ms):
"""
Update the label vector y. The labels of the 50 output steps strictly after the end of the segment
should be set to 1. By strictly we mean that the label of segment_end_y should be 0 while, the
50 followinf labels should be ones.
Arguments:
y -- numpy array of shape (1, Ty), the labels of the training example
segment_end_ms -- the end time of the segment in ms
Returns:
y -- updated labels
"""
# duration of the background (in terms of spectrogram time-steps)
segment_end_y = int(segment_end_ms * Ty / 10000.0)
# Add 1 to the correct index in the background label (y)
for i in range(segment_end_y + 1, segment_end_y + 51):
if i < Ty:
y[0, i] = 1
return y
def create_training_example(background, activates, negatives):
"""
Creates a training example with a given background, activates, and negatives.
Arguments:
background -- a 10 second background audio recording
activates -- a list of audio segments of the word "activate"
negatives -- a list of audio segments of random words that are not "activate"
Returns:
x -- the spectrogram of the training example
y -- the label at each time step of the spectrogram
"""
# Set the random seed
# Make background quieter
background = background - 20
# Step 1: Initialize y (label vector) of zeros (≈ 1 line)
y = np.zeros((1, Ty))
# Step 2: Initialize segment times as empty list (≈ 1 line)
previous_segments = []
# Select 0-4 random "activate" audio clips from the entire list of "activates" recordings
number_of_activates = np.random.randint(0, 5)
random_indices = np.random.randint(len(activates), size=number_of_activates)
random_activates = [activates[i] for i in random_indices]
# Step 3: Loop over randomly selected "activate" clips and insert in background
for random_activate in random_activates:
# Insert the audio clip on the background
background, segment_time = insert_audio_clip(background, random_activate, previous_segments)
# Retrieve segment_start and segment_end from segment_time
segment_start, segment_end = segment_time
# Insert labels in "y"
y = insert_ones(y, segment_end_ms=segment_end)
# Select 0-2 random negatives audio recordings from the entire list of "negatives" recordings
number_of_negatives = np.random.randint(0, 3)
random_indices = np.random.randint(len(negatives), size=number_of_negatives)
random_negatives = [negatives[i] for i in random_indices]
# Step 4: Loop over randomly selected negative clips and insert in background
for random_negative in random_negatives:
# Insert the audio clip on the background
background, _ = insert_audio_clip(background, random_negative, previous_segments)
# Standardize the volume of the audio clip
background = match_target_amplitude(background, -20.0)
# Export new training example
file_handle = background.export("train" + ".wav", format="wav")
print("File (train.wav) was saved in your directory.")
# Get and plot spectrogram of the new recording (background with superposition of positive and negatives)
x = graph_spectrogram("train.wav")
print(y)
return x, y
X=[]
Y=[]
for i in range(0,13):
x, y = create_training_example(backgrounds[0], activates, negatives)
X.append(x)
Y.append(y)
for i in range(13,26):
x, y = create_training_example(backgrounds[1], activates, negatives)
X.append(x)
Y.append(y)
X=np.array([X])
X=X[0]
Y=np.array([Y])
Y=Y[0]
print(X.shape)
print(Y.shape)
X=np.transpose(X,(0,2,1))
Y=np.transpose(Y,(0,2,1))
print(X.shape)
print(Y.shape)
print(X)
print(Y)
for i in Y[0]:
print(i)
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.2,random_state=37)
#X_train.shape
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from keras.optimizers import Adam
def model(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
with the help of <NAME> and <NAME>
"""
X_input = Input(shape = input_shape)
# Step 1: CONV layer (≈4 lines)
X = Conv1D(196, kernel_size=15, strides=4)(X_input) # CONV1D
X = BatchNormalization()(X) # Batch normalization
X = Activation('relu')(X) # ReLu activation
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 2: First GRU Layer (≈4 lines)
X = GRU(units = | |
<reponame>NDevTK/chromium-infra<filename>appengine/findit/model/code_coverage.py
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
class DependencyRepository(ndb.Model):
# The source absolute path of the checkout into the root repository.
# Example: "//third_party/pdfium/" for pdfium in a chromium/src checkout.
path = ndb.StringProperty(indexed=False, required=True)
# The Gitiles hostname, e.g. "pdfium.googlesource.com".
server_host = ndb.StringProperty(indexed=False, required=True)
# The Gitiles project name, e.g. "pdfium.git".
project = ndb.StringProperty(indexed=False, required=True)
# The commit hash of the revision.
revision = ndb.StringProperty(indexed=False, required=True)
@property
def project_url(self):
return 'https://%s/%s' % (self.server_host, self.project)
class GitilesCommit(ndb.Model):
"""Represents a Gitiles commit."""
# The Gitiles hostname, e.g. "chromium.googlesource.com".
server_host = ndb.StringProperty(indexed=True, required=True)
# The Gitiles project name, e.g. "chromium/src".
project = ndb.StringProperty(indexed=True, required=True)
# The giiles ref, e.g. "refs/heads/master".
# NOT a branch name: if specified, must start with "refs/".
ref = ndb.StringProperty(indexed=True, required=True)
# The commit hash of the revision.
revision = ndb.StringProperty(indexed=True, required=True)
class PostsubmitReport(ndb.Model):
"""Represents a postsubmit code coverage report."""
# The Gitiles commit.
gitiles_commit = ndb.StructuredProperty(
GitilesCommit, indexed=True, required=True)
# An optional increasing numeric number assigned to each commit.
commit_position = ndb.IntegerProperty(indexed=True, required=False)
# Timestamp when the commit was committed.
commit_timestamp = ndb.DateTimeProperty(indexed=True, required=True)
# TODO(crbug.com/939443): Make it required once data are backfilled.
# Name of the luci builder that generates the data.
bucket = ndb.StringProperty(indexed=True, required=False)
builder = ndb.StringProperty(indexed=True, required=False)
# Manifest of all the code checkouts when the coverage report is generated.
# In descending order by the length of the relative path in the root checkout.
manifest = ndb.LocalStructuredProperty(
DependencyRepository, repeated=True, indexed=False)
# The top level coverage metric of the report.
# For Clang based languages, the format is a list of 3 dictionaries
# corresponds to 'line', 'function' and 'region' respectively, and each dict
# has format: {'covered': 9526650, 'total': 12699841, 'name': u'|name|'}
summary_metrics = ndb.JsonProperty(indexed=False, required=True)
# The build id that uniquely identifies the build.
build_id = ndb.IntegerProperty(indexed=False, required=True)
# Used to control if a report is visible to the users, and the main use case
# is to quanrantine a 'bad' report. All the reports are visible to admins.
visible = ndb.BooleanProperty(indexed=True, default=False, required=True)
# TODO(crbug.com/1237114): Mark as required once data are backfilled.
# Id of the associated coverage report modifier, 0 otherwise
# For e.g. a usual full codebase PostSubmitReport would
# have modifier_id as 0.
modifier_id = ndb.IntegerProperty(indexed=True, required=False)
@classmethod
def _CreateKey(cls, server_host, project, ref, revision, bucket, builder,
modifier_id):
return ndb.Key(
cls, '%s$%s$%s$%s$%s$%s$%s' % (server_host, project, ref, revision,
bucket, builder, str(modifier_id)))
@classmethod
def Create(cls,
server_host,
project,
ref,
revision,
bucket,
builder,
commit_timestamp,
manifest,
summary_metrics,
build_id,
visible,
commit_position=None,
modifier_id=0):
key = cls._CreateKey(server_host, project, ref, revision, bucket, builder,
modifier_id)
gitiles_commit = GitilesCommit(
server_host=server_host, project=project, ref=ref, revision=revision)
return cls(
key=key,
gitiles_commit=gitiles_commit,
bucket=bucket,
builder=builder,
commit_position=commit_position,
commit_timestamp=commit_timestamp,
manifest=manifest,
summary_metrics=summary_metrics,
build_id=build_id,
visible=visible,
modifier_id=modifier_id)
@classmethod
def Get(cls,
server_host,
project,
ref,
revision,
bucket,
builder,
modifier_id=0):
entity_v3 = cls._CreateKey(server_host, project, ref, revision, bucket,
builder, modifier_id).get()
if entity_v3:
return entity_v3
# TODO(crbug.com/1237114): Remove following code once data are backfilled.
entity_v2 = ndb.Key(
cls, '%s$%s$%s$%s$%s$%s' %
(server_host, project, ref, revision, bucket, builder)).get()
if entity_v2:
return entity_v2
# TODO(crbug.com/939443): Remove following code once data are backfilled.
legacy_key_v1 = ndb.Key(
cls, '%s$%s$%s$%s' % (server_host, project, ref, revision))
return legacy_key_v1.get()
class CoverageReportModifier(ndb.Model):
"""Represents a filter setting used to generate custom coverage reports."""
# The Gitiles hostname, e.g. "chromium.googlesource.com".
server_host = ndb.StringProperty(
indexed=True, default='chromium.googlesource.com', required=True)
# The Gitiles project name, e.g. "chromium/src.git".
project = ndb.StringProperty(
indexed=True, default='chromium/src', required=True)
# Controls whether custom reports are to be generated for this modifier or not
is_active = ndb.BooleanProperty(indexed=True, default=True, required=True)
# Gerrit hashtag to uniquely identify a feature.
gerrit_hashtag = ndb.StringProperty(indexed=True)
# Reference commit to generate coverage reports past a checkpoint.
reference_commit = ndb.StringProperty(indexed=True)
# Timestamp this modifier got created.
insert_timestamp = ndb.DateTimeProperty(auto_now_add=True)
# Timestamp this modifier was last updated.
update_timestamp = ndb.DateTimeProperty(auto_now=True)
@classmethod
def Get(cls, modifier_id):
return ndb.Key(cls, int(modifier_id)).get()
class CLPatchset(ndb.Model):
"""Represents a CL patchset."""
# The Gerrit hostname, e.g. "chromium-review.googlesource.com".
server_host = ndb.StringProperty(indexed=True, required=True)
# The Gerrit project name, e.g. "chromium/src".
# Note that project is optional because the other three already uniquely
# identifies a CL patchset.
project = ndb.StringProperty(indexed=True, required=False)
# The Gerrrit change number, e.g. "138000".
change = ndb.IntegerProperty(indexed=True, required=True)
# The Gerrit patchset number, e.g. "2".
patchset = ndb.IntegerProperty(indexed=True, required=True)
def PercentageValidator(_, value):
"""Validates that the total number of lines is greater than 0."""
if value <= 0:
raise datastore_errors.BadValueError(
'total_lines is expected to be greater than 0.')
return value
class CoveragePercentage(ndb.Model):
"""Represents code coverage percentage metric for a file.
It is stored as a part of PresubmitCoverageData.
"""
# The source absolute path of the file. E.g. //base/test.cc.
path = ndb.StringProperty(indexed=False, required=True)
# Total number of lines.
total_lines = ndb.IntegerProperty(
indexed=False, required=True, validator=PercentageValidator)
# Number of covered lines.
covered_lines = ndb.IntegerProperty(indexed=False, required=True)
class PresubmitCoverageData(ndb.Model):
"""Represents the code coverage data of a change during presubmit."""
# The CL patchset.
cl_patchset = ndb.StructuredProperty(CLPatchset, indexed=True, required=True)
# A list of file level coverage data for all the source files modified by the
# this CL.
data = ndb.JsonProperty(indexed=False, compressed=True, required=False)
# A list of file level coverage data (unit tests only) for all the source
# files modified by this CL.
data_unit = ndb.JsonProperty(indexed=False, compressed=True, required=False)
# Coverage percentages(overall) of all executable lines of the files.
absolute_percentages = ndb.LocalStructuredProperty(
CoveragePercentage, indexed=False, repeated=True)
# Coverage percentages(overall) of *newly added* and executable lines
# of the files.
incremental_percentages = ndb.LocalStructuredProperty(
CoveragePercentage, indexed=False, repeated=True)
# Coverage percentages(unit) of all executable lines of the files.
absolute_percentages_unit = ndb.LocalStructuredProperty(
CoveragePercentage, indexed=False, repeated=True)
# Coverage percentages(unit) of *newly added* and executable lines
# of the files.
incremental_percentages_unit = ndb.LocalStructuredProperty(
CoveragePercentage, indexed=False, repeated=True)
# If assigned, represents the patchset number from which this coverage data is
# generated, and it specifically refers to the scenario where coverage data
# are shared between equivalent patchsets, such as trivial-rebase.
based_on = ndb.IntegerProperty(indexed=True)
# Timestamp this coverage report got created.
insert_timestamp = ndb.DateTimeProperty(auto_now_add=True)
# Timestamp this coverage report was last updated.
update_timestamp = ndb.DateTimeProperty(auto_now=True)
@classmethod
def _CreateKey(cls, server_host, change, patchset):
return ndb.Key(cls, '%s$%s$%s' % (server_host, change, patchset))
@classmethod
def Create(cls,
server_host,
change,
patchset,
data=None,
data_unit=None,
project=None):
assert data or data_unit, "Atleast one of data/data_unit must be specified."
key = cls._CreateKey(server_host, change, patchset)
cl_patchset = CLPatchset(
server_host=server_host,
project=project,
change=change,
patchset=patchset)
return cls(key=key, cl_patchset=cl_patchset, data=data, data_unit=data_unit)
@classmethod
def Get(cls, server_host, change, patchset):
return cls.GetAsync(server_host, change, patchset).get_result()
@classmethod
def GetAsync(cls, server_host, change, patchset):
return cls._CreateKey(server_host, change, patchset).get_async()
class FileCoverageData(ndb.Model):
"""Represents the code coverage data of a single file.
File can be from a dependency checkout, and it can be a generated file instead
of a source file checked into the repo.
"""
# The Gitiles commit.
gitiles_commit = ndb.StructuredProperty(
GitilesCommit, indexed=True, required=True)
# Source absoluate file path.
path = ndb.StringProperty(indexed=True, required=True)
# TODO(crbug.com/939443): Make it required once data are backfilled.
# Name of the luci builder that generates the data.
bucket = ndb.StringProperty(indexed=True, required=False)
builder = ndb.StringProperty(indexed=True, required=False)
# Coverage data for a single file.
# Json structure corresponds to File proto at
# https://chromium.googlesource.com/infra/infra/+/refs/heads/main/appengine/findit/model/proto/code_coverage.proto
data = ndb.JsonProperty(indexed=False, compressed=True, required=True)
# TODO(crbug.com/1237114): Mark as required once data are backfilled.
# Id of the associated coverage report modifier, 0 otherwise
# For e.g. FileCoverageData corresponding to default PostSubmitReport would
# have modifier_id as 0.
modifier_id = ndb.IntegerProperty(indexed=True, required=False)
@classmethod
def _CreateKey(cls, server_host, project, ref, revision, path, bucket,
builder, modifier_id):
return ndb.Key(
cls,
'%s$%s$%s$%s$%s$%s$%s$%s' % (server_host, project, ref, revision, path,
bucket, builder, str(modifier_id)))
@classmethod
def Create(cls,
server_host,
project,
ref,
revision,
path,
bucket,
builder,
data,
modifier_id=0):
assert path.startswith('//'), 'File path must start with "//"'
key = cls._CreateKey(server_host, project, ref, revision, path, bucket,
builder, modifier_id)
gitiles_commit = GitilesCommit(
server_host=server_host, project=project, ref=ref, revision=revision)
return cls(
key=key,
gitiles_commit=gitiles_commit,
path=path,
bucket=bucket,
builder=builder,
modifier_id=modifier_id,
data=data)
@classmethod
def Get(cls,
server_host,
project,
ref,
revision,
path,
bucket,
builder,
modifier_id=0):
entity_v3 = cls._CreateKey(server_host, project, ref, revision, path,
bucket, builder, modifier_id).get()
if | |
good job of exploiting novelty in examples without
getting too obsessed with any particular novel factor.
Roughly speaking what we want to do is give each distinct coverage target
equal amounts of time. However some coverage targets may be harder to fuzz
than others, or may only appear in a very small minority of examples, so we
don't want to let those dominate the testing.
Targets are selected according to the following rules:
1. We ideally want valid examples as our starting point. We ignore
interesting examples entirely, and other than that we restrict ourselves
to the best example status we've seen so far. If we've only seen
OVERRUN examples we use those. If we've seen INVALID but not VALID
examples we use those. Otherwise we use VALID examples.
2. Among the examples we've seen with the right status, when asked to
select a target, we select a coverage target and return that along with
an example exhibiting that target uniformly at random.
Coverage target selection proceeds as follows:
1. Whenever we return an example from select, we update the usage count of
each of its tags.
2. Whenever we see an example, we add it to the list of examples for all of
its tags.
3. When selecting a tag, we select one with a minimal usage count. Among
those of minimal usage count we select one with the fewest examples.
Among those, we select one uniformly at random.
This has the following desirable properties:
1. When two coverage targets are intrinsically linked (e.g. when you have
multiple lines in a conditional so that either all or none of them will
be covered in a conditional) they are naturally deduplicated.
2. Popular coverage targets will largely be ignored for considering what
test to run - if every example exhibits a coverage target, picking an
example because of that target is rather pointless.
3. When we discover new coverage targets we immediately exploit them until
we get to the point where we've spent about as much time on them as the
existing targets.
4. Among the interesting deduplicated coverage targets we essentially
round-robin between them, but with a more consistent distribution than
uniformly at random, which is important particularly for short runs.
"""
def __init__(self, random):
self.random = random
self.best_status = Status.OVERRUN
self.reset()
def reset(self):
self.examples_by_tags = defaultdict(list)
self.tag_usage_counts = Counter()
self.tags_by_score = defaultdict(SampleSet)
self.scores_by_tag = {}
self.scores = []
self.mutation_counts = 0
self.example_counts = 0
self.non_universal_tags = set()
self.universal_tags = None
def add(self, data):
if data.status == Status.INTERESTING:
return
if data.status < self.best_status:
return
if data.status > self.best_status:
self.best_status = data.status
self.reset()
if self.universal_tags is None:
self.universal_tags = set(data.tags)
else:
not_actually_universal = self.universal_tags - data.tags
for t in not_actually_universal:
self.universal_tags.remove(t)
self.non_universal_tags.add(t)
self.examples_by_tags[t] = list(
self.examples_by_tags[universal]
)
new_tags = data.tags - self.non_universal_tags
for t in new_tags:
self.non_universal_tags.add(t)
self.examples_by_tags[negated(t)] = list(
self.examples_by_tags[universal]
)
self.example_counts += 1
for t in self.tags_for(data):
self.examples_by_tags[t].append(data)
self.rescore(t)
def has_tag(self, tag, data):
if tag is universal:
return True
if isinstance(tag, Negated):
return tag.tag not in data.tags
return tag in data.tags
def tags_for(self, data):
yield universal
for t in data.tags:
yield t
for t in self.non_universal_tags:
if t not in data.tags:
yield negated(t)
def rescore(self, tag):
new_score = (
self.tag_usage_counts[tag], len(self.examples_by_tags[tag]))
try:
old_score = self.scores_by_tag[tag]
except KeyError:
pass
else:
self.tags_by_score[old_score].remove(tag)
self.scores_by_tag[tag] = new_score
sample = self.tags_by_score[new_score]
if len(sample) == 0:
heapq.heappush(self.scores, new_score)
sample.add(tag)
def select_tag(self):
while True:
peek = self.scores[0]
sample = self.tags_by_score[peek]
if len(sample) == 0:
heapq.heappop(self.scores)
else:
return sample.choice(self.random)
def select_example_for_tag(self, t):
return self.random.choice(self.examples_by_tags[t])
def select(self):
t = self.select_tag()
self.mutation_counts += 1
result = self.select_example_for_tag(t)
assert self.has_tag(t, result)
for s in self.tags_for(result):
self.tag_usage_counts[s] += 1
self.rescore(s)
return t, result
def block_program(description):
"""Mini-DSL for block rewriting. A sequence of commands that will be run
over all contiguous sequences of blocks of the description length in order.
Commands are:
* ".", keep this block unchanged
* "-", subtract one from this block.
* "0", replace this block with zero
* "X", delete this block
If a command does not apply (currently only because it's - on a zero
block) the block will be silently skipped over. As a side effect of
running a block program its score will be updated.
"""
def run(self):
n = len(description)
i = 0
while i + n <= len(self.shrink_target.blocks):
attempt = bytearray(self.shrink_target.buffer)
failed = False
for k, d in reversed(list(enumerate(description))):
j = i + k
u, v = self.blocks[j]
if d == '-':
value = int_from_bytes(attempt[u:v])
if value == 0:
failed = True
break
else:
attempt[u:v] = int_to_bytes(value - 1, v - u)
elif d == 'X':
del attempt[u:v]
else: # pragma: no cover
assert False, 'Unrecognised command %r' % (d,)
if failed or not self.incorporate_new_buffer(attempt):
i += 1
run.command = description
run.__name__ = 'block_program(%r)' % (description,)
return run
class PassClassification(Enum):
CANDIDATE = 0
HOPEFUL = 1
DUBIOUS = 2
AVOID = 3
SPECIAL = 4
@total_ordering
@attr.s(slots=True, cmp=False)
class ShrinkPass(object):
pass_function = attr.ib()
index = attr.ib()
classification = attr.ib(default=PassClassification.CANDIDATE)
successes = attr.ib(default=0)
runs = attr.ib(default=0)
calls = attr.ib(default=0)
shrinks = attr.ib(default=0)
deletions = attr.ib(default=0)
@property
def failures(self):
return self.runs - self.successes
@property
def name(self):
return self.pass_function.__name__
def __eq__(self, other):
return self.index == other.index
def __hash__(self):
return hash(self.index)
def __lt__(self, other):
return self.key() < other.key()
def key(self):
# Smaller is better.
return (
self.runs,
self.failures,
self.calls,
self.index
)
class Shrinker(object):
"""A shrinker is a child object of a ConjectureRunner which is designed to
manage the associated state of a particular shrink problem.
Currently the only shrink problem we care about is "interesting and with a
particular interesting_origin", but this is abstracted into a general
purpose predicate for more flexibility later - e.g. we are likely to want
to shrink with respect to a particular coverage target later.
Data with a status < VALID may be assumed not to satisfy the predicate.
The expected usage pattern is that this is only ever called from within the
engine.
"""
DEFAULT_PASSES = [
'pass_to_descendant',
'zero_examples',
'adaptive_example_deletion',
'reorder_examples',
'minimize_duplicated_blocks',
'minimize_individual_blocks',
]
EMERGENCY_PASSES = [
block_program('-XX'),
block_program('XX'),
'example_deletion_with_block_lowering',
'shrink_offset_pairs',
'minimize_block_pairs_retaining_sum',
]
def __init__(self, engine, initial, predicate):
"""Create a shrinker for a particular engine, with a given starting
point and predicate. When shrink() is called it will attempt to find an
example for which predicate is True and which is strictly smaller than
initial.
Note that initial is a ConjectureData object, and predicate
takes ConjectureData objects.
"""
self.__engine = engine
self.__predicate = predicate
self.discarding_failed = False
self.__shrinking_prefixes = set()
self.initial_size = len(initial.buffer)
# We add a second level of caching local to the shrinker. This is a bit
# of a hack. Ideally we'd be able to rely on the engine's functionality
# for this. Doing it this way has two benefits: Firstly, the engine
# does not currently cache overruns (and probably shouldn't, but could
# recreate them on demand if necessary), and secondly Python dicts are
# much faster than our pure Python tree-based lookups.
self.__test_function_cache = {}
# We keep track of the current best example on the shrink_target
# attribute.
self.shrink_target = None
self.update_shrink_target(initial)
self.shrinks = 0
self.initial_calls = self.__engine.call_count
self.current_pass_depth = 0
self.passes_by_name = {}
self.clear_passes()
for p in Shrinker.DEFAULT_PASSES:
self.add_new_pass(p)
for p in Shrinker.EMERGENCY_PASSES:
self.add_new_pass(p, classification=PassClassification.AVOID)
self.add_new_pass(
'lower_common_block_offset',
classification=PassClassification.SPECIAL
)
def clear_passes(self):
"""Reset all passes on the shrinker, leaving it in a blank state.
This is mostly useful for testing.
"""
# Note that we deliberately do not clear passes_by_name. This means
# that we can still look up and explicitly run the standard passes,
# they just won't be avaiable by default.
self.passes = []
self.passes_awaiting_requeue = []
self.pass_queues = {c: [] for c in PassClassification}
self.known_programs = set()
def add_new_pass(self, run, classification=PassClassification.CANDIDATE):
"""Creates a shrink pass corresponding to calling ``run(self)``"""
if isinstance(run, str):
run = getattr(Shrinker, run)
p = ShrinkPass(
pass_function=run, index=len(self.passes),
classification=classification,
)
if hasattr(run, | |
<gh_stars>0
#!/usr/bin/env python
# =============================================================================
# nnAvicaching_find_weights_hiddenlayer.py
# Author: <NAME> -- github: @anmolkabra
# Project: Solving the Avicaching Game Faster and Better (Summer 2017)
# -----------------------------------------------------------------------------
# Purpose of the Script:
# Refer to the Report (link) for detailed explanation. In a gist, this script
# learns the weights that highlight the change of eBird agents' behavior
# after certain rewards are applied. The model uses a **4-layered** neural
# network.
# -----------------------------------------------------------------------------
# Required Dependencies/Software:
# - Python 2.x (obviously, Anaconda environment used originally)
# - PyTorch
# - NumPy
# -----------------------------------------------------------------------------
# Required Local Files/Data/Modules:
# - ./data/*
# - ./avicaching_data.py
# =============================================================================
from __future__ import print_function
import argparse
import time
import math
import os
import sys
import numpy as np
import matplotlib
try:
os.environ["DISPLAY"]
except KeyError as e:
# working without X/GUI environment
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import avicaching_data as ad
# import torch modules
import torch, torch.nn as nn
import torch.nn.functional as torchfun
import torch.optim as optim
from torch.autograd import Variable
matplotlib.rcParams.update({'font.size': 14}) # font-size for plots
# =============================================================================
# training specs
# =============================================================================
parser = argparse.ArgumentParser(description="NN Avicaching model for finding weights")
# training parameters
parser.add_argument("--lr", type=float, default=0.001, metavar="LR",
help="inputs learning rate of the network (default=0.001)")
parser.add_argument("--no-cuda", action="store_true", default=False,
help="disables CUDA training")
parser.add_argument("--epochs", type=int, default=10, metavar="E",
help="inputs the number of epochs to train for")
# data options
parser.add_argument("--train-percent", type=float, default=0.8, metavar="T",
help="breaks the data into T percent training and rest testing (default=0.8)")
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default=1)')
parser.add_argument("--locations", type=int, default=116, metavar="J",
help="inputs the number of locations (default=116)")
parser.add_argument("--time", type=int, default=173, metavar="T",
help="inputs total time of data collection; number of weeks (default=173)")
parser.add_argument("--rand", action="store_true", default=False,
help="uses random xyr data")
# plot/log options
parser.add_argument("--no-plots", action="store_true", default=False,
help="skips generating plot maps")
parser.add_argument("--hide-loss-plot", action="store_true", default=False,
help="hides the loss plot, which is only saved")
parser.add_argument("--hide-map-plot", action="store_true", default=False,
help="hides the map plot, which is only saved")
parser.add_argument("--log-interval", type=int, default=1, metavar="I",
help="prints training information at I epoch intervals (default=1)")
# deprecated options -- not deleting if one chooses to use them
parser.add_argument("--expand-R", action="store_true", default=False,
help="[see script] expands the reward vectors into matrices with distributed rewards")
parser.add_argument("--eta", type=float, default=10.0, metavar="F",
help="[see script] inputs parameter eta in the model (default=10.0)")
parser.add_argument("--lambda-L1", type=float, default=10.0, metavar="LAM",
help="[see script] inputs the L1 regularizing coefficient")
parser.add_argument("--momentum", type=float, default=1.0, metavar="M",
help="[see script] inputs SGD momentum (default=1.0)") # if using SGD
args = parser.parse_args()
# assigning cuda check and test check to single variables
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.should_test = (args.train_percent != 1.0)
# set the seeds
torch.manual_seed(args.seed)
np.random.seed(seed=args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# =============================================================================
# constants and parameters
# =============================================================================
# global values and datasets
torchten = torch.FloatTensor # change here to use diff containers
J, T, numFeatures = args.locations, args.time, 0
trainX, trainY, trainR, testX, testY, testR, F_DIST = [], [], [], [], [], [], []
u_train, u_test = np.array([]), np.array([])
num_train = int(math.floor(args.train_percent * T))
num_test = T - num_train
# random datasets locations assigned to variables
locs_in_file = 232 # change this to use a diff random file
randXYR_file = "./data/random/randXYR" + str(locs_in_file) + ".txt"
randXYR_weights_file = "./data/random/randXYR" + str(locs_in_file) + "_weights.txt"
randF_file = "./data/random/randF" + str(locs_in_file) + ".csv"
randDIST_file = "./data/random/randDIST" + str(locs_in_file) + ".txt"
# =============================================================================
# data input functions
# =============================================================================
def read_set_data():
"""
Reads Datasets X, Y, R, f, D from the files using avicaching_data
module's functions. f and D are then combined into F_DIST as preprocessed
tensor. All datasets are normalized, expanded, averaged as required,
leaving as torch tensors at the end of the function.
"""
global trainX, trainY, trainR, testX, testY, testR, F_DIST, numFeatures
global u_train, u_test
# shapes of datasets -- [] means expanded form:
# - X, Y: T x J
# - R: T x J [x 15]
# - net.w1: J x numF x numF
# - net.w2: J x numF x 1
# - F_DIST: J x J x numF
# read f and DIST datasets from file, operate on them
if args.rand:
F = ad.read_F_file(randF_file, J)
DIST = ad.read_dist_file(randDIST_file, J)
else:
F = ad.read_F_file(
"./data/loc_feature_with_avicaching_combined.csv", J)
DIST = ad.read_dist_file(
"./data/site_distances_km_drastic_price_histlong_0327_0813_combined.txt",
J)
F = ad.normalize(F, along_dim=0, using_max=True) # normalize using max
DIST = ad.normalize(DIST, using_max=True) # normalize using max
# process data for the NN
numFeatures = len(F[0]) + 1 # compensating for the distance element
F_DIST = torchten(ad.combine_DIST_F(F, DIST, J, numFeatures))
numFeatures += 1 # for reward later
# operate on XYR data
X, Y, R = [], [], []
if args.rand:
if not os.path.isfile(randXYR_file):
# file doesn't exists, make random data, write to file
X, Y, R = make_rand_data()
ad.save_rand_XYR(randXYR_file, X, Y, R, J, T)
X, Y, R = ad.read_XYR_file(randXYR_file, J, T)
else:
X, Y, R = ad.read_XYR_file(
"./data/density_shift_histlong_as_previous_loc_classical_drastic_price_0327_0813.txt",
J, T)
u = np.sum(Y, axis=1) # u weights for calculating losses
# normalize X, Y using sum along rows
X = ad.normalize(X, along_dim=1, using_max=False)
Y = ad.normalize(Y, along_dim=1, using_max=False)
if not args.expand_R:
R = ad.normalize(R, along_dim=0, using_max=False)
# split the XYR data
if args.should_test:
# training and testing, shuffle and split the data
shuffle_order = np.random.permutation(T)
trainX, testX = ad.split_along_dim(X[shuffle_order], num_train, dim=0)
trainY, testY = ad.split_along_dim(Y[shuffle_order], num_train, dim=0)
trainR, testR = ad.split_along_dim(R[shuffle_order], num_train, dim=0)
u_train, u_test = ad.split_along_dim(u[shuffle_order], num_train, dim=0)
else:
# no testing, split the data -> test Matrices are empty
trainX, testX = ad.split_along_dim(X, num_train, dim=0)
trainY, testY = ad.split_along_dim(Y, num_train, dim=0)
trainR, testR = ad.split_along_dim(R, num_train, dim=0)
u_train, u_test = ad.split_along_dim(u, num_train, dim=0)
# change the input data into pytorch tensors and variables
trainR, testR = torchten(trainR), torchten(testR)
u_train, u_test = torchten(u_train), torchten(u_test)
trainX = Variable(torchten(trainX), requires_grad=False)
trainY = Variable(torchten(trainY), requires_grad=False)
testX = Variable(torchten(testX), requires_grad=False)
testY = Variable(torchten(testY), requires_grad=False)
if args.expand_R:
# expand R (trainR and testR)
trainR_ext = torchten(num_train, J, 15)
testR_ext = torchten(num_test, J, 15)
for t in xrange(num_train):
trainR_ext[t] = expand_R(trainR[t], R_max=15)
for t in xrange(num_test):
testR_ext[t] = expand_R(testR[t], R_max=15)
trainR, testR = trainR_ext, testR_ext
numFeatures += 14 # 1 reward already added, adding the remaining 14
def make_rand_data(X_max=100.0, R_max=100.0):
"""
This script uses the random datasets generated by
nnAvicaching_find_weights.py (use random datasets only for measuring
computation time -- results don't matter). So this function doesn't have
much use.
Creates random X and R and calculates Y based on random weights. Also
stores the weights in files before returning.
Args:
X_max -- (float) Maximum value of element in X dataset (default=100.0)
R_max -- (float) Maximum value of element in R dataset (default=100.0)
Returns:
3-tuple -- (X, Y, R) (values are not de-normalized)
"""
global F_DIST
# create random X and R and w
origX = np.floor(np.random.rand(T, J) * X_max)
origR = np.floor(np.random.rand(T, J) * R_max)
X = ad.normalize(origX, along_dim=1, using_max=False)
R = torchten(ad.normalize(origR, along_dim=0, using_max=False))
w1 = Variable(torch.randn(J, numFeatures, numFeatures).type(torchten))
w2 = Variable(torch.randn(J, numFeatures, numFeatures).type(torchten))
w3 = Variable(torch.randn(J, numFeatures, 1).type(torchten))
# convert to torch tensor and create placeholder for Y
Y = np.empty([T, J])
X = Variable(torchten(X), requires_grad=False)
Y = Variable(torchten(Y), requires_grad=False)
if args.cuda:
# transfer to GPU
X, Y, R, F_DIST = X.cuda(), Y.cuda(), R.cuda(), F_DIST.cuda()
w1, w2, w3 = w1.cuda(), w2.cuda(), w3.cuda()
# build Y
for t in xrange(T):
# build the input by appending testR[t]
inp = build_input(R[t])
if args.cuda:
inp = inp.cuda()
inp = Variable(inp)
# feed in data
inp = torchfun.relu(torch.bmm(inp, w1)) # first weights
inp = torchfun.relu(torch.bmm(inp, w2)) # second weights
inp = torch.bmm(inp, w3).view(-1, J) # third weights
# add eta to inp[u][u]
# eta_matrix = Variable(eta * torch.eye(J).type(torchten))
# if args.cuda:
# eta_matrix = eta_matrix.cuda()
# inp += eta_matrix
P = torchfun.softmax(inp).t()
# calculate Y
Y[t] = torch.mv(P, X[t])
# for verification of random data, save weights ---------------------------
w1_matrix = w1.data.cpu().numpy()
w2_matrix = w2.data.cpu().numpy()
w3_matrix = w3.data.view(-1, numFeatures).cpu().numpy()
with open(randXYR_weights_file, "w") as f:
# save w1
f.write('# w1 shape: {0}\n'.format(w1.shape))
for data_slice in w1_matrix:
f.write('# New slice\n')
np.savetxt(f, data_slice, fmt="%.15f", delimiter=" ")
# save w2
f.write('# w2 shape: {0}\n'.format(w2.shape))
for data_slice in w2_matrix:
f.write('# New slice\n')
np.savetxt(f, data_slice, fmt="%.15f", delimiter=" ")
# save w3
f.write('# w3 shape: {0}\n'.format(w3.shape))
np.savetxt(f, w3_matrix, fmt="%.15f", delimiter=" ")
# -------------------------------------------------------------------------
return (X.data.cpu().numpy(), Y.data.cpu().numpy(), R.cpu().numpy())
def test_given_data(X, Y, R, w1, w2, w3, J, T, u):
"""
Tests a given set of datasets, printing the loss value after one
forward propagation.
Args:
All arguments are self-explanatory
"""
# loss_normalizer divides the calculated loss after feed forward
# formula = || ((u * (Y-mean(Y)))^2 ||
loss_normalizer = (torch.mv(torch.t(Y \
- torch.mean(Y).expand_as(Y)).data, u)).pow(2).sum()
loss = 0
for t | |
import pandas as pd
import numpy as np
import config
class Result:
"""
A class used to represent a Result.
Attributes
----------
ticker : sequence
The stock ticker.
data : dataframe
The historical data associated with the ticker.
strategy : Strategy
An instance of the Strategy class.
buy_transactions: sequence
List of buy transactions.
sell_transactions: sequence
List of sell transactions.
buy_transaction_equity: sequence
List of equity values corresponding to the buy transactions.
sell_transaction_equity: sequence
List of equity values corresponding to the sell transactions.
Performance : Performance
An instance of the Performance class.
transactions : numeric
The required multiple of the 20D MA volume to generate a buy signal.
Methods
-------
performance_as_dict()
Returns the performance results in a dictionary.
tech_indicators()
Augments the data attribute with columns for technical indicators.
buy_and_sell_signals()
Calculate signals where they can be vectorised.
trade()
Enters and exit positions based on buy/sell signals.
calculate_returns()
Calculate returns after the trade method has been executed.
print_results()
Print the performance results to the console.
"""
def __init__(self, ticker, strategy, raw_data):
self.ticker = ticker
self.data = raw_data
self.strategy = strategy
self.tech_indicators()
self.buy_and_sell_signals()
self.buy_transactions, self.sell_transactions, self.buy_transaction_equity, self.sell_transaction_equity = self.trade()
self.Performance = self.calculate_returns()
self.transactions = len(self.buy_transactions + self.sell_transactions)
self.print_results()
def performance_as_dict(self):
"""Returns the performance results in a dictionary.
Parameters
----------
Raises
------
"""
return {'ticker': self.ticker, 'strategy': "Strategy(" + str(self.strategy.required_profit) + ", " + str(
self.strategy.required_pct_change_min) + ", " + str(self.strategy.required_pct_change_max) + ", " + str(
self.strategy.required_volume) + ")",
'annualised_return': self.Performance.annualised_return,
'annualised_return_ref': self.Performance.annualised_return_ref,
'end_date': self.Performance.end_date,
'end_price': self.Performance.end_price,
'gain': self.Performance.gain,
'gain_ref': self.Performance.gain_ref,
'start_date': self.Performance.start_date,
'start_price': self.Performance.start_price}
def tech_indicators(self):
"""Augments the data attribute with columns for technical indicators.
Parameters
----------
Raises
------
"""
self.data = self.data.assign(close_MA_50=self.data[["close"]].ewm(span=50).mean())
self.data = self.data.assign(close_MA_200=self.data[["close"]].ewm(span=200).mean())
self.data = self.data.assign(volume_MA_20=self.data[["volume"]].rolling(20).mean())
self.data = self.data.assign(
price_change_buy=self.data['close'].pct_change().between(self.strategy.required_pct_change_min,
self.strategy.required_pct_change_max))
self.data = self.data.assign(
volume_change_buy=(self.data["volume"] > self.strategy.required_volume * self.data["volume_MA_20"]))
# Money Flow Index (MFI)
typical_price = (self.data["high"] + self.data["low"] + self.data["close"]) / 3
money_flow = typical_price * self.data["volume"]
delta = money_flow - money_flow.shift(1)
delta = pd.Series([0 if np.isnan(x) else x for x in delta])
positive_money_flow = pd.Series([x if x > 0 else 0 for x in delta])
negative_money_flow = pd.Series([abs(x) if x < 0 else 0 for x in delta])
positive_money_flow_sum = positive_money_flow.rolling(window=14).sum().values
negative_money_flow_sum = negative_money_flow.rolling(window=14).sum().values
with np.errstate(divide='ignore', invalid='ignore'):
money_ratio = positive_money_flow_sum / negative_money_flow_sum
money_flow_index = 100 - 100 / (1 + money_ratio)
self.data = self.data.assign(MFI=money_flow_index)
# Relative Strength Index (RSI)
delta = self.data["close"] - self.data["close"].shift(1)
delta = pd.Series([0 if np.isnan(x) else x for x in delta])
up = pd.Series([x if x > 0 else 0 for x in delta])
down = pd.Series([abs(x) if x < 0 else 0 for x in delta])
with np.errstate(divide='ignore', invalid='ignore'):
rs = up.rolling(window=14).mean().values / down.rolling(window=14).mean().values
relative_strength_index = 100 - 100 / (1 + rs)
self.data = self.data.assign(RSI=relative_strength_index)
# Stochastic Oscillator
stochastic_oscillator = pd.Series(
(self.data["close"] - self.data["close"].rolling(window=14, center=False).min()) / (
self.data["close"].rolling(window=14, center=False).max() - self.data["close"].rolling(window=14,
center=False).min()))
stochastic_oscillator = 100 * stochastic_oscillator.rolling(window=3).mean()
self.data = self.data.assign(STO=stochastic_oscillator)
# Bollinger Bands
rolling_mean = self.data[["close"]].ewm(span=50).mean()
rolling_std = self.data[["close"]].ewm(span=50).std()
self.data = self.data.assign(BB_upper=rolling_mean + (rolling_std * 2))
self.data = self.data.assign(BB_lower=rolling_mean - (rolling_std * 2))
return
def buy_and_sell_signals(self):
"""Calculate signals where they can be vectorised.
Generation of sell signal requires iterating through the data which is done in the trade method.
Parameters
----------
Raises
------
"""
self.data = self.data.assign(buy_signal=np.nan, sell_signal=np.nan, buy_signal_date=np.nan,
sell_signal_date=np.nan)
buy_prices = self.data["close"].iloc[np.where(self.data["volume_change_buy"] & self.data["price_change_buy"])]
buy_dates = self.data["date"].iloc[np.where(self.data["volume_change_buy"] & self.data["price_change_buy"])]
self.data = self.data.assign(buy_signal=buy_prices)
self.data = self.data.assign(buy_signal_date=buy_dates)
return
def trade(self):
"""Enters and exit positions based on buy/sell signals.
Parameters
----------
Raises
------
"""
buy_transactions, buy_transaction_equity, sell_transactions, sell_transaction_equity = ([] for i in range(4))
open_long_position, buy_and_hold, buy_and_hold_shares, buy_and_hold, buy_and_hold_shares, shares = (
0, 0, 0, 0, 0, 0)
buy_and_hold_position_array, open_long_position_array, strategy_equity_array, buy_and_hold_equity_array = (
np.full(len(self.data["close"].values), np.nan) for i in range(4))
# Create buy signal and buy signal dates without NaN or NaT (NaN and NaT inclusive arrays required for plots)
buy_signal_array_nonan = self.data["buy_signal"].values[~np.isnan(self.data["buy_signal"].values)]
buy_signal_array_dates_nonat = self.data["buy_signal_date"].values[
~np.isnat(self.data["buy_signal_date"].values)]
j = 0
cash = config.cash
buy_and_hold_cash = config.buy_and_hold_cash
for i in range(0, len(self.data["close"].values)):
# Handle buy
if np.isfinite(self.data["buy_signal"].values[i]):
if not open_long_position:
open_long_position = self.data["close"].values[i]
shares = (1 - config.transaction_fee) * (cash / open_long_position)
cash = 0
buy_transactions.append(pd.to_datetime(self.data["date"].values[i]).strftime("%d-%m-%Y"))
buy_transaction_equity.append(round(shares * self.data["close"].values[i] + cash, 2))
if not buy_and_hold:
buy_and_hold_shares = ((1 - config.transaction_fee) * buy_and_hold_cash) / \
self.data["close"].values[i]
buy_and_hold_cash = 0
buy_and_hold = 1
# Handle sell
elif (j < len(buy_signal_array_nonan) and self.data["date"].values[i] > buy_signal_array_dates_nonat[j] and
self.data["close"].values[
i] > self.strategy.required_profit *
buy_signal_array_nonan[j]):
# Need to offset the index which is based on the original dataframe with all tickers
self.data.at[self.data.index[0] + i, "sell_signal"] = self.data["close"].values[i]
self.data.at[self.data.index[0] + i, "sell_signal_date"] = pd.to_datetime(self.data["date"].values[i])
if open_long_position:
j = j + 1
cash = (1 - config.transaction_fee) * shares * self.data["close"].values[i]
shares = 0
open_long_position = 0
sell_transactions.append(pd.to_datetime(self.data["date"].values[i]).strftime("%d-%m-%Y"))
sell_transaction_equity.append(round(shares * self.data["close"].values[i] + cash, 2))
# Record open positions
open_long_position_array[i] = self.data["close"].values[i] if open_long_position else 0
buy_and_hold_position_array[i] = self.data["close"].values[i] if buy_and_hold else 0
# Record equity
buy_and_hold_equity_array[i] = buy_and_hold_shares * buy_and_hold_position_array[
i] + buy_and_hold_cash
strategy_equity_array[i] = shares * open_long_position_array[i] + cash
self.data.sell_signal_date = self.data.sell_signal_date.astype("datetime64[ns]", copy=False)
self.data = self.data.assign(strategy_equity=strategy_equity_array,
buy_and_hold_equity=buy_and_hold_equity_array,
open_long_position=open_long_position_array,
buy_and_hold_position=buy_and_hold_position_array)
return buy_transactions, sell_transactions, buy_transaction_equity, sell_transaction_equity
def calculate_returns(self):
"""Calculate returns after the trade method has been executed.
Parameters
----------
Raises
------
"""
# Calculate returns using strategies and buy and hold
date_index_long = np.isfinite(self.data["open_long_position"])
date_index_buy_and_hold = np.isfinite(self.data["buy_and_hold_position"])
# Handle case where there is no long position
if self.data["date"][date_index_long].empty:
performance = Performance(0, 0, 0, 0, 0, 0, 0, 0)
return performance
else:
start_date = self.data["date"][date_index_long].iloc[0]
start_date_ref = self.data["date"][date_index_buy_and_hold].iloc[0]
start_price = self.data["strategy_equity"][date_index_long].iloc[0]
start_price_ref = self.data["buy_and_hold_equity"][date_index_buy_and_hold].iloc[0]
end_date = self.data["date"][date_index_long].iloc[-1]
end_date_ref = self.data["date"][date_index_buy_and_hold].iloc[-1]
end_price = self.data["strategy_equity"][date_index_long].iloc[-1]
end_price_ref = self.data["buy_and_hold_equity"][date_index_buy_and_hold].iloc[-1]
# Compute annualised returns
delta = 1 + (end_date - start_date).days
delta_ref = 1 + (end_date_ref - start_date_ref).days
annualised_return = 100 * (((end_price / start_price) ** (365 / delta)) - 1)
annualised_return_ref = 100 * (((end_price_ref / start_price_ref) ** (365 / delta_ref)) - 1)
gain = end_price / start_price
gain_ref = end_price_ref / start_price_ref
performance = Performance(annualised_return, annualised_return_ref, start_price, start_date, end_price,
end_date, gain, gain_ref)
return performance
def print_results(self):
"""Print the performance results to the console.
Parameters
----------
Raises
------
"""
print(str(self.ticker) + " Strategy Annual Return: " + str(self.Performance.annualised_return) + "%" + "\n" +
str(self.ticker) + " Buy Signals: " + str(
[pd.to_datetime(i).strftime("%d-%m-%Y") for i in self.data["buy_signal_date"].tolist() if
not pd.isna(i)]) + "\n" +
str(self.ticker) + " Buy Transactions: " + str(self.buy_transactions) + "\n" +
str(self.ticker) + " Buy Transaction Equity: " + str(self.buy_transaction_equity) + "\n" +
str(self.ticker) + " Position Start Date: " + str(
pd.to_datetime(self.Performance.start_date).strftime("%d-%m-%Y")) + "\n" +
str(self.ticker) + " Position Equity Start: " + str(self.Performance.start_price) + "\n" +
str(self.ticker) + " Sell Signals: " + str(
[pd.to_datetime(i).strftime("%d-%m-%Y") for i in self.data["sell_signal_date"].tolist() if
not pd.isna(i)]) + "\n" +
str(self.ticker) + " Sell Transactions: " + str(self.sell_transactions) + "\n" +
str(self.ticker) + " Sell Transaction Equity: " + str(self.sell_transaction_equity) + "\n" +
str(self.ticker) + " Position End Date: " + str(
pd.to_datetime(self.Performance.end_date).strftime("%d-%m-%Y")) + "\n" +
str(self.ticker) + " Position Equity End: " + str(self.Performance.end_price) + "\n" +
str(self.ticker) + " Buy and Hold Annual Return: " + str(
self.Performance.annualised_return_ref) + "%" + "\n" +
str(self.ticker) + " Strategy Gain: " + str(self.Performance.gain) + "\n" +
str(self.ticker) + " Buy and Hold Gain: " + str(self.Performance.gain))
return
class Performance:
"""
A class used to hold the performance for the Result.
Attributes
----------
annualised_return : numeric
The annualised return based on equity changes following the buy and sell transactions (based on the trading
strategy) in the trade method.
annualised_return_ref : numeric
The annualised return based on equity changes following the buy and hold transactions in the trade method.
start_price : numeric
The equity at the start of the strategy.
start_date : numeric
The date at the start of the strategy.
end_price : numeric
The equity at the end of the strategy.
end_date : numeric
The date at the end of the strategy.
gain : numeric
The raw gain (i.e. not annualised) based on equity changes following the buy and sell transactions (based on
the trading strategy) | |
<gh_stars>10-100
# --------------------------------------------------------
# mmqgis_library - mmqgis operation functions
#
# begin : 10 May 2010
# copyright : (c) 2010 by <NAME>
# email : See <EMAIL>
#
# MMQGIS is free software and is offered without guarantee
# or warranty. You can redistribute it and/or modify it
# under the terms of version 2 of the GNU General Public
# License (GPL v2) as published by the Free Software
# Foundation (www.gnu.org).
# --------------------------------------------------------
import io
import re
import csv
import sys
import time
import locale
import random
import urllib2
import os.path
import operator
import tempfile
import xml.etree.ElementTree
from qgis.core import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# Used instead of "import math" so math functions can be used without "math." prefix
from math import *
# --------------------------------------------------------
# MMQGIS Utility Functions
# --------------------------------------------------------
# Needed to replace the useful function QgsVectorLayer::featureAtId()
# that was tantalizingly added in 1.9 but then removed
def mmqgis_feature_at_id(layer, featureid):
iterator = layer.getFeatures(QgsFeatureRequest(featureid))
feature = QgsFeature()
if iterator.nextFeature(feature):
return feature
return None
def mmqgis_find_layer(layer_name):
# print "find_layer(" + str(layer_name) + ")"
#for name, search_layer in QgsMapLayerRegistry.instance().mapLayers().iteritems():
# if search_layer.name() == layer_name:
# return search_layer
if not layer_name:
return None
layers = QgsMapLayerRegistry.instance().mapLayersByName(layer_name)
if (len(layers) >= 1):
return layers[0]
return None
def mmqgis_is_float(s):
try:
float(s)
return True
except:
return False
# Cumbersome function to give backward compatibility before python 2.7
def mmqgis_format_float(value, separator, decimals):
formatstring = ("%0." + unicode(int(decimals)) + "f")
# print str(value) + ": " + formatstring
string = formatstring % value
intend = string.find('.')
if intend < 0:
intend = len(string)
if separator and (intend > 3):
start = intend % 3
if start == 0:
start = 3
intstring = string[0:start]
for x in range(start, intend, 3):
intstring = intstring + separator + string[x:x+3]
string = intstring + string[intend:]
return string
def mmqgis_gridify_points(hspacing, vspacing, points):
# Align points to grid
point_count = 0
deleted_points = 0
newpoints = []
for point in points:
point_count += 1
newpoints.append(QgsPoint(round(point.x() / hspacing, 0) * hspacing, \
round(point.y() / vspacing, 0) * vspacing))
# Delete overlapping points
z = 0
while z < (len(newpoints) - 2):
if newpoints[z] == newpoints[z + 1]:
newpoints.pop(z + 1)
deleted_points += 1
else:
z += 1
# Delete line points that go out and return to the same place
z = 0
while z < (len(newpoints) - 3):
if newpoints[z] == newpoints[z + 2]:
newpoints.pop(z + 1)
newpoints.pop(z + 1)
deleted_points += 2
# Step back to catch arcs
if (z > 0):
z -= 1
else:
z += 1
# Delete overlapping start/end points
while (len(newpoints) > 1) and (newpoints[0] == newpoints[len(newpoints) - 1]):
newpoints.pop(len(newpoints) - 1)
deleted_points += 2
return newpoints, point_count, deleted_points
# http://stackoverflow.com/questions/3410976/how-to-round-a-number-to-significant-figures-in-python
def mmqgis_round(number, digits):
if (number == 0):
return 0
else:
return round(number, digits - int(floor(log10(abs(number)))) - 1)
# Use common address abbreviations to reduce naming discrepancies and improve hit ratio
def mmqgis_searchable_streetname(name):
# print "searchable_name(" + str(name) + ")"
if not name:
return ""
# name = unicode(name).strip().lower()
name = name.strip().lower()
name = name.replace(".", "")
name = name.replace(" street", " st")
name = name.replace(" avenue", " av")
name = name.replace(" plaza", " plz")
name = name.replace(" drive", " dr")
name = name.replace("saint ", "st ")
name = name.replace("fort ", "ft ")
name = name.replace(" ave", " av")
name = name.replace("east", "e")
name = name.replace("west", "w")
name = name.replace("north", "n")
name = name.replace("south", "s")
name = name.replace("1st", "1")
name = name.replace("2nd", "2")
name = name.replace("3rd", "3")
name = name.replace("4th", "4")
name = name.replace("5th", "5")
name = name.replace("6th", "6")
name = name.replace("7th", "7")
name = name.replace("8th", "8")
name = name.replace("9th", "9")
name = name.replace("0th", "0")
name = name.replace("1th", "1")
name = name.replace("2th", "2")
name = name.replace("3th", "3")
return name
# Parses and normalizes street addresses to make search easier.
# Returns list: [number street unit]
def mmqgis_normalize_address(address):
if not address:
return [None, None, None]
# Everything upper case
address = address.strip()
address = address.upper()
# Remove confusing punctuation
address = address.replace(".", "")
address = address.replace(",", "")
# address = address.replace("#", "")
parts = address.split()
if (len(parts) <= 1):
return [None, address, None]
# Find Unit Number (if any)
unit = None
if (len(parts) >= 3) and (parts[len(parts) - 2] == "SUITE"):
unit = parts[len(parts) - 1]
del parts[(len(parts) - 2):len(parts)]
elif (len(parts) >= 3) and (parts[len(parts) - 2] == "UNIT"):
unit = parts[len(parts) - 1]
del parts[(len(parts) - 2):len(parts)]
elif (len(parts) >= 3) and (parts[len(parts) - 2] == "STE"):
unit = parts[len(parts) - 1]
del parts[(len(parts) - 2):len(parts)]
elif (len(parts) >= 3) and (parts[len(parts) - 2] == "FLOOR"):
unit = "FLOOR " + unicode(parts[len(parts) - 1])
del parts[(len(parts) - 2):len(parts)]
elif (len(parts) > 1) and (parts[len(parts) - 1][0] == "#"):
unit = parts[len(parts) - 1].replace('#','')
del parts[len(parts) - 1]
# Find Lot Number (if any)
number = None
if (len(parts) >= 3) and (not parts[0][0].isdigit()) and \
(parts[len(parts) - 2][0].isdigit()) and (parts[len(parts) - 1][0].isdigit()):
# European street number + unit number last
number = parts[len(parts) - 2]
del parts[len(parts) - 2]
elif (not parts[0][0].isdigit() and (parts[len(parts) - 1][0].isdigit())):
# European street number last
number = parts[len(parts) - 1]
del parts[len(parts) - 1]
elif parts[0][0].isdigit():
# American street number first
number = parts[0]
del parts[0]
# Replace numeric suffixes
suffixes = {
"1ST": "1",
"2ND": "2",
"3RD": "3",
"4TH": "4",
"5TH": "5",
"6TH": "6",
"7TH": "7",
"8TH": "8",
"9TH": "9",
"0TH": "0",
"1TH": "1",
"2TH": "2",
"3TH": "3" }
# Regular expressions are much faster than loop
# https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html
regex = re.compile("(%s)" % "|".join(suffixes.keys()))
for index, part in enumerate(parts):
parts[index] = regex.sub(lambda x: suffixes[x.group(0)], part)
#for suffix in suffixes:
# for index, part in enumerate(parts):
# parts[index] = part.replace(suffix[0], suffix[1])
# Text versions of numbered streets
number_words = {
"FIRST": "1",
"SECOND": "2",
"THIRD": "3",
"FOURTH": "4",
"FIFTH": "5",
"SIXTH": "6",
"SEVENTH": "7",
"EIGHTH": "8",
"NINTH": "9",
"TENTH": "10",
"ELEVENTH": "11",
"TWELFTH": "12",
"THIRTEENTH": "13",
"FOURTEENTH": "14",
"FIFTEENTH": "15",
"SIXTEENTH": "16",
"SEVENTEENTH": "17",
"EIGHTEENTH": "18",
"NINTEENTH": "19",
"TWENTIETH": "20" }
parts = map(lambda x: number_words.get(x, x), parts)
# Full strings replaced with abbreviations because
# replacing abbreviations with full strings would involve uncertain inference
# while replacing full strings with abbreviations increases
# potential matches at the expense of accuracy
abbreviations = {
"STREET": "ST",
"BOULEVARD": "BLVD",
"PARKWAY": "PKWY",
"HIGHWAY": "HWY",
"CIRCLE": "CIR",
"AVENUE": "AV",
"PLACE": "PL",
"PLAZA": "PL",
"DRIVE": "DR",
"SAINT": "ST",
"NORTH": "N",
"SOUTH": "S",
"FORT": "FT",
"ROAD": "RD",
"EAST": "E",
"WEST": "W",
"AVE": "AV"}
# map() is faster than loop
parts = map(lambda x: abbreviations.get(x, x), parts)
#for abbreviation in abbreviations:
# for index, part in enumerate(parts):
# if (part == abbreviation[0]):
# parts[index] = abbreviation[1]
# Recombine into street name
street = ' '.join(parts)
return [number, street, unit]
def mmqgis_geocode_address_google(address, apikey):
if apikey:
url = "https://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=" + address + "&key=" + apikey
else:
url = "http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=" + address
max_attempts = 5
for attempt in range(1, max_attempts + 1):
try:
xml = urllib2.urlopen(url).read()
break
except Exception, e:
message = "Failure connecting to maps.googleapis.com: " + unicode(e)
if (attempt >= max_attempts):
return message, None, None, None, None
# Wait a second and try again
time.sleep(1)
if (xml.find('OVER_QUERY_LIMIT') > 0):
return "Exceeded Daily Google Limit", None, None, None, None
if (xml.find('The provided API key is invalid.') > 0):
return "Invalid API Key", None, None, None, None
if (xml.find('REQUEST_DENIED') > 0):
return "Request Denied", None, None, None, None
#print(url)
resultstart = 0
x = []
y = []
addrtype = []
addrlocat = []
formatted_addr = []
resultstart = xml.find("<result>")
while (resultstart > 0):
resultend = xml.find("</result>", resultstart)
if (resultend < 0):
resultend = len(xml)
result = xml[resultstart:resultend]
resultstart = xml.find("<result>", resultend)
latstart = result.find("<lat>")
latend = result.find("</lat>")
if (latstart < 0) or (latend < (latstart + 5)):
continue
longstart = result.find("<lng>")
longend = result.find("</lng>")
if (longstart < 0) and (longend < (longstart + 5)):
continue
y.append(float(result[latstart + 5:latend]))
x.append(float(result[longstart + 5:longend]))
addrtypestart = result.find("<type>")
addrtypeend = result.find("</type>")
if (addrtypestart > 0) and (addrtypeend > (addrtypestart + 6)):
addrtype.append(unicode(result[(addrtypestart + 6):addrtypeend], 'utf-8').strip())
else:
addrtype.append("")
addrlocatstart = result.find("<location_type>")
addrlocatend = result.find("</location_type>")
if (addrlocatstart > 0) and (addrlocatend > (addrlocatstart + 15)):
addrlocat.append(unicode(result[(addrlocatstart + 15):addrlocatend], 'utf-8').strip())
else:
addrlocat.append("")
formstart = result.find("<formatted_address>")
formend = result.find("</formatted_address>")
if (formstart > 0) and (formend > (formstart + 19)):
formatted_addr.append(unicode(result[(formstart + 19):formend], 'utf-8').strip())
else:
formatted_addr.append(address)
return x, y, addrtype, addrlocat, formatted_addr
def mmqgis_geocode_address_osm(address):
url = "http://nominatim.openstreetmap.org/search?format=xml&q=" + address
max_attempts = 5
for attempt in range(1, max_attempts + 1):
try:
osm = urllib2.urlopen(url).read()
break
except Exception, e:
message = "Failure connecting to maps.googleapis.com: " + unicode(e)
if (attempt >= max_attempts):
return message, None, None, None, None
# Wait a second and try again
time.sleep(1)
# print(url)
# print(osm)
x = []
y = []
addrtype = []
addrlocat = []
formatted_addr = []
# Parse the XML
try:
results = xml.etree.ElementTree.fromstring(osm)
# results = tree.getroot()
except:
# print("XML Parser Failure")
return None, None, None, None, None
# Parse <place> under <searchresults>
for place in results:
try:
lat = place.attrib['lat']
lon = place.attrib['lon']
except:
lat = None
lon = None
try:
aclass = place.attrib['class']
except:
aclass = ""
try:
atype = place.attrib['type']
except:
atype = ""
try:
aname = place.attrib['display_name']
except:
aname = ""
# print(lat, lon)
if (lat != None) and (lon != None):
x.append(float(lon))
y.append(float(lat))
addrtype.append(aclass)
addrlocat.append(atype)
formatted_addr.append(aname)
return x, y, addrtype, addrlocat, formatted_addr
# Legacy code left here for reference if anything breaks in new XML parsing 1/31/2016
def mmqgis_old_geocode_address_osm(address):
url = "http://nominatim.openstreetmap.org/search?format=xml&q=" + address
try:
xml = urllib2.urlopen(url).read()
except:
# URLError as e: e.reason
return "Failure connecting to nominatim.openstreetmap.org", None, None, None, None
#print(url)
#print(xml)
x = []
y = []
addrtype = []
addrlocat = []
formatted_addr = []
placestart = xml.find("<place")
while (placestart > 0):
placeend = xml.find("/>", placestart)
if (placeend < 0):
placeend = len(xml)
place = xml[placestart:placeend]
placestart = xml.find("<place", placeend)
latstart = place.find('lat="')
latend = place.find('"', latstart + 5)
if (latstart < 0) or (latend < (latstart + 5)):
continue
longstart = place.find('lon="')
longend = place.find('"', longstart + 5)
if (latstart < 0) or (latend < (latstart + 5)):
continue
y.append(float(place[latstart + 5:latend]))
x.append(float(place[longstart + 5:longend]))
addrtypestart = place.find("class=")
addrtypeend = place.find("'", addrtypestart + 7)
if (addrtypestart > 0) and (addrtypeend > (addrtypestart + 7)):
addrtype.append(unicode(place[(addrtypestart + 7):addrtypeend], 'utf-8').strip())
else:
addrtype.append("")
addrlocatstart = place.find("type=")
addrlocatend = place.find("'", addrlocatstart + 6)
if (addrlocatstart > 0) and (addrlocatend > (addrlocatstart + 6)):
addrlocat.append(unicode(place[(addrlocatstart + 6):addrlocatend], 'utf-8').strip())
else:
addrlocat.append("")
formstart = place.find("display_name=")
formend = place.find("'", formstart + 14)
if (formstart > 0) and (formend > (formstart + 14)):
formatted_addr.append(unicode(place[(formstart + 14):formend], 'utf-8').strip())
else:
formatted_addr.append(address)
return x, y, addrtype, addrlocat, formatted_addr
def mmqgis_wkbtype_to_text(wkbtype):
if wkbtype == QGis.WKBUnknown: return "Unknown"
if wkbtype == QGis.WKBPoint: return "point"
if wkbtype == QGis.WKBLineString: return "linestring"
if wkbtype == QGis.WKBPolygon: return "polygon"
if wkbtype == QGis.WKBMultiPoint: | |
<filename>f5_cccl/service/manager.py
"""Manages the creation and deployment of desired services configuration."""
# coding=utf-8
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from time import time
import f5_cccl.exceptions as exc
from f5_cccl.service.config_reader import ServiceConfigReader
from f5_cccl.service.validation import ServiceConfigValidator
from f5_cccl.resource.ltm.node import ApiNode
from f5_cccl.utils.route_domain import (
encoded_normalize_address_with_route_domain)
from f5_cccl.utils.route_domain import split_ip_with_route_domain
LOGGER = logging.getLogger(__name__)
# Check for upgrade issues on first pass only
class ServiceConfigDeployer(object):
"""CCCL config deployer class."""
first_pass = True
def __init__(self, bigip_proxy):
"""Initialize the config deployer."""
self._bigip = bigip_proxy
def _get_resource_tasks(self, existing, desired):
"""Get the list of resources to create, delete, update."""
create_list = [
desired[resource] for resource in
set(desired) - set(existing)
]
update_list = set(desired) & set(existing)
update_list = [
desired[resource] for resource in update_list
if desired[resource] != existing[resource]
]
delete_list = [
existing[resource] for resource in
set(existing) - set(desired)
]
return (create_list, update_list, delete_list)
def _create_resources(self, create_list):
"""Iterate over the resources and call create method."""
LOGGER.debug("Creating %d resources...", len(create_list))
retry_list = list()
for resource in create_list:
try:
start_time = time()
resource.create(self._bigip.mgmt_root())
LOGGER.debug("Created %s in %.5f seconds.",
resource.name, (time() - start_time))
except exc.F5CcclResourceConflictError:
LOGGER.warning(
"Resource /%s/%s already exists, skipping task...",
resource.partition, resource.name)
except (exc.F5CcclResourceCreateError,
exc.F5CcclError) as e:
LOGGER.error(str(e))
LOGGER.error(
"Resource /%s/%s creation error, requeuing task...",
resource.partition, resource.name)
retry_list.append(resource)
return retry_list
def _update_resources(self, update_list):
"""Iterate over the resources and call update method."""
LOGGER.debug("Updating %d resources...", len(update_list))
retry_list = list()
for resource in update_list:
try:
start_time = time()
resource.update(self._bigip.mgmt_root())
LOGGER.debug("Updated %s in %.5f seconds.",
resource.name, (time() - start_time))
except exc.F5CcclResourceNotFoundError as e:
LOGGER.warning(
"Resource /%s/%s does not exist, skipping task...",
resource.partition, resource.name)
except (exc.F5CcclResourceUpdateError,
exc.F5CcclResourceRequestError,
exc.F5CcclError) as e:
LOGGER.error(str(e))
LOGGER.error(
"Resource /%s/%s update error, requeuing task...",
resource.partition, resource.name)
retry_list.append(resource)
return retry_list
def _delete_resources(self, delete_list, retry=True):
"""Iterate over the resources and call delete method."""
LOGGER.debug("Deleting %d resources...", len(delete_list))
retry_list = list()
for resource in delete_list:
try:
start_time = time()
resource.delete(self._bigip.mgmt_root())
LOGGER.debug("Deleted %s in %.5f seconds.",
resource.name, (time() - start_time))
except exc.F5CcclResourceNotFoundError:
LOGGER.warning(
"Resource /%s/%s does not exist, skipping task...",
resource.partition, resource.name)
except (exc.F5CcclResourceDeleteError,
exc.F5CcclResourceRequestError,
exc.F5CcclError) as e:
LOGGER.error(str(e))
if retry:
LOGGER.error(
"Resource /%s/%s delete error, requeuing task...",
resource.partition, resource.name)
retry_list.append(resource)
return retry_list
def _get_monitor_tasks(self, desired_config):
"""Get CRUD tasks for all monitors."""
create_monitors = list()
delete_monitors = list()
update_monitors = list()
for hm_type in ['http', 'https', 'tcp', 'icmp', 'udp']:
existing = self._bigip.get_monitors(hm_type)
config_key = "{}_monitors".format(hm_type)
desired = desired_config.get(config_key, dict())
(create_hm, update_hm, delete_hm) = (
self._get_resource_tasks(existing, desired))
create_monitors += create_hm
update_monitors += update_hm
delete_monitors += delete_hm
return (create_monitors, update_monitors, delete_monitors)
def _get_user_tunnel_tasks(self, desired):
"""Get the update tasks for user-created fdb tunnels."""
all_tunnels = self._bigip.get_fdb_tunnels(all_tunnels=True)
# Get only the tunnels we desire
update_list = set(desired) & set(all_tunnels)
update_list = [
desired[resource] for resource in update_list
if desired[resource] != all_tunnels[resource]
]
return update_list
def _desired_nodes(self, default_route_domain):
"""Desired nodes is inferred from the active pool members."""
desired_nodes = dict()
nodes = self._bigip.get_nodes()
pools = self._bigip.get_pools(True)
for pool in pools:
for member in pools[pool].members:
pool_addr = member.name.split('%3A')[0]
pool_addr_rd = encoded_normalize_address_with_route_domain(
pool_addr, default_route_domain, True, False)
# make a copy to iterate over, then delete from 'nodes'
node_list = list(nodes.keys())
for key in node_list:
node_addr = nodes[key].data['address']
node_addr_rd = encoded_normalize_address_with_route_domain(
node_addr, default_route_domain, False, False)
if node_addr_rd == pool_addr_rd:
node = {'name': key,
'partition': nodes[key].partition,
'address': node_addr_rd,
'default_route_domain': default_route_domain,
'state': 'user-up',
'session': 'user-enabled'}
desired_node = ApiNode(**node)
desired_nodes[desired_node.name] = desired_node
return desired_nodes
# pylint: disable=too-many-locals
def _pre_deploy_legacy_ltm_cleanup(self):
"""Remove legacy named resources (pre Route Domain support)
We now create node resources with names that include the route
domain whether the end user specified them or not. This prevents
inconsistent behavior when the default route domain is changed for
the managed partition.
This function can be removed when the cccl version >= 2.0
"""
# Detect legacy names (nodes do not include the route domain)
self._bigip.refresh_ltm()
existing_nodes = self._bigip.get_nodes()
node_list = list(existing_nodes.keys())
for node_name in node_list:
route_domain = split_ip_with_route_domain(node_name)[1]
if route_domain is None:
break
else:
return
existing_iapps = self._bigip.get_app_svcs()
existing_virtuals = self._bigip.get_virtuals()
existing_policies = self._bigip.get_l7policies()
existing_irules = self._bigip.get_irules()
existing_internal_data_groups = self._bigip.get_internal_data_groups()
existing_pools = self._bigip.get_pools()
delete_iapps = self._get_resource_tasks(existing_iapps, {})[-1]
delete_virtuals = self._get_resource_tasks(existing_virtuals, {})[-1]
delete_policies = self._get_resource_tasks(existing_policies, {})[-1]
delete_irules = self._get_resource_tasks(existing_irules, {})[-1]
delete_internal_data_groups = self._get_resource_tasks(
existing_internal_data_groups, {})[-1]
delete_pools = self._get_resource_tasks(existing_pools, {})[-1]
delete_monitors = self._get_monitor_tasks({})[-1]
delete_nodes = self._get_resource_tasks(existing_nodes, {})[-1]
delete_tasks = delete_iapps + delete_virtuals + delete_policies + \
delete_irules + delete_internal_data_groups + delete_pools + \
delete_monitors + delete_nodes
taskq_len = len(delete_tasks)
finished = False
LOGGER.debug("Removing legacy resources...")
while not finished:
LOGGER.debug("Legacy cleanup service task queue length: %d",
taskq_len)
# Must remove all resources that depend on nodes (vs, pools, ???)
delete_tasks = self._delete_resources(delete_tasks)
tasks_remaining = len(delete_tasks)
# Did the task queue shrink?
if tasks_remaining >= taskq_len or tasks_remaining == 0:
# No, we have stopped making progress.
finished = True
# Reset the taskq length.
taskq_len = tasks_remaining
def _post_deploy(self, desired_config, default_route_domain):
"""Perform post-deployment service tasks/cleanup.
Remove superfluous resources that could not be inferred from the
desired config.
"""
LOGGER.debug("Perform post-deploy service tasks...")
self._bigip.refresh_ltm()
# Delete/update nodes (no creation)
LOGGER.debug("Post-process nodes.")
existing = self._bigip.get_nodes()
desired = self._desired_nodes(default_route_domain)
(update_nodes, delete_nodes) = \
self._get_resource_tasks(existing, desired)[1:3]
self._update_resources(update_nodes)
self._delete_resources(delete_nodes)
# Delete extraneous virtual addresses
LOGGER.debug("Remove superfluous virtual addresses.")
desired = desired_config.get('virtual_addresses', dict())
(referenced, unreferenced) = (
self._bigip.get_virtual_address_references()
)
delete_vaddrs = self._get_resource_tasks(unreferenced, desired)[2]
self._delete_resources(delete_vaddrs)
# Get the set of virtual addresses that are created by virtuals
# but not in the set of desired virtual addresses.
update_vaddrs = list()
auto_created = self._get_resource_tasks(referenced, desired)[2]
for vaddr in auto_created:
if vaddr.data['enabled'] == "no":
vaddr.data['enabled'] = "yes"
update_vaddrs.append(vaddr)
self._update_resources(update_vaddrs)
def deploy_ltm( # pylint: disable=too-many-locals,too-many-statements
self, desired_config, default_route_domain):
"""Deploy the managed partition with the desired LTM config.
:param desired_config: A dictionary with the configuration
to be applied to the bigip managed partition.
:returns: The number of tasks that could not be completed.
"""
# Remove legacy resources (pre RD-named resources) before deploying
# new configuration
if ServiceConfigDeployer.first_pass:
ServiceConfigDeployer.first_pass = False
self._pre_deploy_legacy_ltm_cleanup()
self._bigip.refresh_ltm()
# Get the list of virtual address tasks
LOGGER.debug("Getting virtual address tasks...")
existing = self._bigip.get_virtual_addresses()
desired = desired_config.get('virtual_addresses', dict())
(create_vaddrs, update_vaddrs) = (
self._get_resource_tasks(existing, desired))[0:2]
# Get the list of virtual server tasks
LOGGER.debug("Getting virtual server tasks...")
existing_virtuals = self._bigip.get_virtuals()
desired = desired_config.get('virtuals', dict())
(create_virtuals, update_virtuals, delete_virtuals) = (
self._get_resource_tasks(existing_virtuals, desired))
# Get the list of pool tasks
LOGGER.debug("Getting pool tasks...")
existing_pools = self._bigip.get_pools()
desired = desired_config.get('pools', dict())
(create_pools, update_pools, delete_pools) = (
self._get_resource_tasks(existing_pools, desired))
# Get the list of irule tasks
LOGGER.debug("Getting iRule tasks...")
existing = self._bigip.get_irules()
desired = desired_config.get('irules', dict())
(create_irules, update_irules, delete_irules) = (
self._get_resource_tasks(existing, desired))
# Get the list of internal data group tasks
LOGGER.debug("Getting InternalDataGroup tasks...")
existing = self._bigip.get_internal_data_groups()
desired = desired_config.get('internaldatagroups', dict())
(create_internal_data_groups, update_internal_data_groups,
delete_internal_data_groups) = (
self._get_resource_tasks(existing, desired))
# Get the list of policy tasks
LOGGER.debug("Getting policy tasks...")
existing = self._bigip.get_l7policies()
desired = desired_config.get('l7policies', dict())
(create_policies, update_policies, delete_policies) = (
self._get_resource_tasks(existing, desired))
# Get the list of iapp tasks
LOGGER.debug("Getting iApp tasks...")
existing_iapps = self._bigip.get_app_svcs()
desired = desired_config.get('iapps', dict())
(create_iapps, update_iapps, delete_iapps) = (
self._get_resource_tasks(existing_iapps, desired))
# Get the list of monitor tasks
LOGGER.debug("Getting monitor tasks...")
(create_monitors, update_monitors, delete_monitors) = (
self._get_monitor_tasks(desired_config))
LOGGER.debug("Building task lists...")
create_tasks = create_vaddrs + create_monitors + \
create_pools + create_internal_data_groups + create_irules + \
create_policies + create_virtuals + create_iapps
update_tasks = update_vaddrs + update_monitors + \
update_pools + update_internal_data_groups + update_irules + \
update_policies + update_virtuals + update_iapps
delete_tasks = delete_iapps + delete_virtuals + delete_policies + \
delete_irules + delete_internal_data_groups + delete_pools + \
delete_monitors
taskq_len = len(create_tasks) + len(update_tasks) + len(delete_tasks)
taskq_len = self._run_tasks(
taskq_len, create_tasks, update_tasks, delete_tasks)
self._post_deploy(desired_config, default_route_domain)
return taskq_len
def deploy_net(self, desired_config): # pylint: disable=too-many-locals
"""Deploy the managed partition with | |
<filename>pokemon.py<gh_stars>10-100
from .utils import *
from .tags import Tags
from aqt.qt import *
from aqt.utils import tooltip
# Nickname Settings
def Nickname():
deckmonlist, f = get_pokemons()
if deckmonlist is None:
return
displaylist = []
for item in deckmonlist:
deckname = mw.col.decks.name(item[1])
if len(item) == 4:
if item[2] < 5:
displaytext = "%s - Egg from %s" % (item[3], deckname)
elif item[0].startswith("Eevee"):
displaytext = "%s - Eevee (Level %s) from %s" % (
item[3], int(item[2]), deckname)
else:
displaytext = "%s - %s (Level %s) from %s" % (
item[3], item[0], int(item[2]), deckname)
else:
if item[2] < 5:
displaytext = "Egg from %s" % (deckname)
elif item[0].startswith("Eevee"):
displaytext = "Eevee (Level %s) from %s" % (
int(item[2]), deckname)
else:
displaytext = "%s (Level %s) from %s" % (
item[0], int(item[2]), deckname)
displaylist.append(displaytext)
totallist = list(zip(deckmonlist, displaylist))
nicknamewindow = QWidget()
inp, ok = QInputDialog.getItem(
nicknamewindow, "Pokemanki", "Choose a Pokémon who you would like to give a new nickname", displaylist, 0, False)
deckmon = []
nickname = ""
if ok and inp:
for thing in totallist:
if inp in thing:
deckmon = thing[0]
displaytext = inp
if not deckmon:
return
if len(deckmon) == 4:
nickname = deckmon[3]
inp, ok = QInputDialog.getText(nicknamewindow, "Pokemanki", (
"Enter a new nickname for %s (leave blank to remove nickname)" % displaytext))
if ok:
if inp:
nickname = inp
deckmon = [deckmon[0], deckmon[1], deckmon[2], nickname]
newnickname = QMessageBox()
newnickname.setWindowTitle("Pokemanki")
if int(deckmon[2]) < 5:
newnickname.setText(
"New nickname given to Egg - %s" % (nickname))
elif deckmon[0].startswith("Eevee"):
newnickname.setText(
"New nickname given to Eevee - %s" % (nickname))
else:
newnickname.setText(
"New nickname given to %s - %s" % (deckmon[0], nickname))
newnickname.exec_()
else:
deckmon = [deckmon[0], deckmon[1], deckmon[2]]
nicknameremoved = QMessageBox()
nicknameremoved.setWindowTitle("Pokemanki")
if int(deckmon[2]) < 5:
nicknameremoved.setText("Nickname removed from Egg")
elif deckmon[0].startswith("Eevee"):
nicknameremoved.setText("Nickname removed from Eevee")
else:
nicknameremoved.setText(
"Nickname removed from %s" % deckmon[0])
nicknameremoved.exec_()
modifieddeckmonlist = []
for item in deckmonlist:
if item[1] == deckmon[1]:
modifieddeckmonlist.append(deckmon)
else:
modifieddeckmonlist.append(item)
if f:
write_json("_tagmon.json", modifieddeckmonlist)
else:
write_json("_pokemanki.json", modifieddeckmonlist)
def Toggle():
window = QWidget()
items = ("Decks (Default)", "Tags")
by = get_json("_decksortags.json")
default = 0
if by:
default = 1
inp, ok = QInputDialog.getItem(
window, "Pokemanki", "Choose how you would like Pokemanki to assign you Pokémon.", items, default, False)
if ok and inp:
if inp == "Tags":
write_json("_decksortags.json", inp)
tags = Tags()
tags.tagMenu()
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText(
"Please restart Anki to see your updated Pokémon.")
settingschanged.exec_()
else:
write_json("_decksortags.json", "")
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText(
"Please restart Anki to see your updated Pokémon.")
settingschanged.exec_()
# Threshold Settings
def ThresholdSettings():
global thresholdlist
# Find recommended number of cards for starter Pokemon threshold (based on deck with highest number of cards).
decklist = mw.col.decks.allIds()
sumlist = []
for deck in decklist:
sumlist.append(len(mw.col.decks.cids(deck)))
recommended = .797 * max(sumlist)
# Refresh threshold settings
thresholdlist = get_json("_pokemankisettings.json")
# Make settings window (input dialog)
window = QWidget()
inp, ok = QInputDialog.getInt(window, "Pokemanki", (
"Change number of cards needed in a deck to get a starter Pokémon (recommended %d)" % recommended), value=thresholdlist[4])
if ok:
# Make sure threshold is at least 10
if inp < 10:
error = QMessageBox()
error.setWindowTitle("Pokemanki")
error.setText("Number must be at least ten")
error.exec_()
# Change settings and save them if the threshold is changed
elif inp != thresholdlist[4]:
newthresholdlist = [
int(0.1*inp), int(0.25*inp), int(0.5*inp), int(0.75*inp), int(inp)]
write_json("_pokemankisettings.json", newthresholdlist)
# Message box confirming change
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText("Your settings have been changed")
settingschanged.exec_()
# Show the window
window.show()
# Reset Pokemon
def ResetPokemon():
# Make message box
resetwindow = QMessageBox()
resetwindow.setWindowTitle("Pokemanki")
resetwindow.setText("\n".join((
"Are you sure you want to reset your Pokémon?",
"This will reset everything including everstone, settings stored in collection.media, etc.",
"All your pokemons will be lost - both in deck and tag mode."
)))
resetwindow.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
resetwindow.setDefaultButton(QMessageBox.No)
resetresult = resetwindow.exec_()
# Clear pokemanki.json if Yes
if resetresult == QMessageBox.Yes:
reset_files = [
"_pokemanki.json", "_tagmon.json",
"_alolanlist.json", "_everstonelist.json", "_everstonepokemonlist.json", "_megastonelist.json",
"_pokemankisettings.json", "_tagmonsettings.json", "_prestigelist.json", "_tagmon.json", "_tags.json", "_trades.json"
]
for fname in reset_files:
write_json(fname, {})
# TODO reset everstone? and other stuff?
# Message box confirming reset
resetdone = QMessageBox()
resetdone.setWindowTitle("Pokemanki")
resetdone.setText("Pokemon reset")
resetdone.exec_()
def MovetoBottom():
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText(
"Please restart Anki to see your updated settings.")
settingschanged.exec_()
def MovetoTop():
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText(
"Please restart Anki to see your updated settings.")
settingschanged.exec_()
def giveEverstone():
pokemon, f = get_pokemons()
if pokemon is None:
return
everstonelist = get_json("_everstonelist.json", default=[])
everstonepokemonlist = get_json("_everstonepokemonlist.json", default=[])
everstoneables = []
for item in pokemon:
if f:
cb = ("%s (Level %s) from %s" % (item[0], item[2], item[1]))
else:
cb = ("%s (Level %s) from %s" %
(item[0], item[2], mw.col.decks.name(item[1])))
if item[1] in everstonelist:
continue
elif cb in everstoneables:
continue
else:
everstoneables.append(cb)
if not everstoneables:
tooltip("You don't have any pokemons that can get an everstone")
return
window = QWidget()
inp, ok = QInputDialog.getItem(
window, "Pokemanki", "Select a Pokemon you would like to give an everstone to.", sorted(everstoneables), 0, False)
if inp and ok:
textlist = inp.split(" from ")
item = textlist[1]
everstone_pokemon_name = inp.split(" (Level ")[0]
if f:
everstonelist.append(item)
everstonepokemonlist.append(everstone_pokemon_name)
else:
everstonelist.append(mw.col.decks.id(item))
everstonepokemonlist.append(everstone_pokemon_name)
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText("Please restart Anki to see changes.")
settingschanged.exec_()
write_json("_everstonelist.json", everstonelist)
write_json("_everstonepokemonlist.json", everstonepokemonlist)
def takeEverstone():
pokemon, f = get_pokemons()
if pokemon is None:
return
everstonelist = get_json("_everstonelist.json", [])
everstonepokemonlist = get_json("_everstonepokemonlist.json", [])
if not everstonelist:
noeverstone = QMessageBox()
noeverstone.setWindowTitle("Pokemanki")
noeverstone.setText("None of your Pokémon are holding everstones.")
noeverstone.exec_()
return
possibleuneverstones = []
for thing in everstonelist:
for item in pokemon:
if item[1] == thing:
if f:
cb = ("%s from %s" % (item[0], item[1]))
else:
cb = ("%s from %s" % (item[0], mw.col.decks.name(item[1])))
if cb in possibleuneverstones:
continue
else:
possibleuneverstones.append(cb)
else:
continue
window = QWidget()
inp, ok = QInputDialog.getItem(
window, "Pokemanki", "Select a Pokemon whose everstone you would like to take.", sorted(possibleuneverstones), 0, False)
if inp and ok:
textlist = inp.split(" from ")
item = textlist[1]
if f:
everstonelist.remove(item)
everstonepokemonlist.remove(textlist[0])
else:
everstonelist.remove(mw.col.decks.id(item))
everstonepokemonlist.remove(textlist[0])
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText("Please restart Anki to see your changes.")
settingschanged.exec_()
write_json("_everstonelist.json", everstonelist)
def giveMegastone():
pokemon, f = get_pokemons()
if pokemon is None:
return
megastonelist = get_json("_megastonelist.json", [])
megastoneables = []
for item in pokemon:
if item[2] >= 70:
if f:
cb = ("%s (Level %s) from %s" % (item[0], item[2], item[1]))
else:
cb = ("%s (Level %s) from %s" %
(item[0], item[2], mw.col.decks.name(item[1])))
if item[1] in megastonelist:
continue
elif cb in megastoneables:
continue
else:
megastoneables.append(cb)
else:
continue
if not megastoneables:
tooltip("You don't have any pokemons that can get a mega stone")
return
window = QWidget()
inp, ok = QInputDialog.getItem(
window, "Pokemanki", "Select a Pokemon you would like to give a mega stone to", sorted(megastoneables), 0, False)
if inp and ok:
textlist = inp.split(" from ")
item = textlist[1]
if f:
megastonelist.append(item)
else:
megastonelist.append(mw.col.decks.id(item))
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText("Please restart Anki to see your changes.")
settingschanged.exec_()
write_json("_megastonelist.json", megastonelist)
def takeMegastone():
pokemon, f = get_pokemons()
if pokemon is None:
return
megastonelist = get_json("_megastonelist.json", [])
if not megastonelist:
nomegastone = QMessageBox()
nomegastone.setWindowTitle("Pokemanki")
nomegastone.setText("None of your Pokémon are holding mega stones.")
nomegastone.exec_()
return
possibleunmegastones = []
for thing in megastonelist:
for item in pokemon:
if item[1] == thing:
if f:
cb = ("%s from %s" % (item[0], item[1]))
else:
cb = ("%s from %s" % (item[0], mw.col.decks.name(item[1])))
if cb in possibleunmegastones:
continue
else:
possibleunmegastones.append(cb)
else:
continue
window = QWidget()
inp, ok = QInputDialog.getItem(
window, "Pokemanki", "Select a Pokemon whose mega stone you would like to take", sorted(possibleunmegastones), 0, False)
if inp and ok:
textlist = inp.split(" from ")
item = textlist[1]
if f:
megastonelist.remove(item)
else:
megastonelist.remove(mw.col.decks.id(item))
settingschanged = QMessageBox()
settingschanged.setWindowTitle("Pokemanki")
settingschanged.setText("Please restart Anki to see your changes.")
settingschanged.exec_()
write_json("_megastonelist.json", megastonelist)
def giveAlolanPassport():
pokemon, f = get_pokemons()
if pokemon is None:
return
alolanlist = get_json("_alolanlist.json", [])
alolanables = []
for item in pokemon:
if f:
cb = ("%s (Level %s) from %s" % (item[0], item[2], item[1]))
else:
cb = ("%s (Level %s) from %s" %
(item[0], item[2], mw.col.decks.name(item[1])))
if item[1] in alolanlist:
continue
elif cb in alolanables:
continue
else:
alolanables.append(cb)
if not alolanables:
tooltip("You don't have any pokemons | |
<filename>cli/src/klio_cli/commands/job/gke.py
# Copyright 2021 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import glom
import yaml
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from klio_cli import __version__ as klio_cli_version
from klio_cli.commands import base
from klio_cli.utils import docker_utils
# Regex according to https://kubernetes.io/docs/concepts/overview/
# working-with-objects/labels/#syntax-and-character-set
K8S_LABEL_KEY_PREFIX_REGEX = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])"
r"(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$"
)
K8S_LABEL_KEY_NAME_REGEX = re.compile(
r"^[a-zA-Z0-9]$|^[a-zA-Z0-9]([a-zA-Z0-9\._\-]){,61}[a-zA-Z0-9]$"
)
K8S_LABEL_VALUE_REGEX = re.compile(
r"^[a-zA-Z0-9]{0,1}$|^[a-zA-Z0-9]([a-zA-Z0-9\._\-]){,61}[a-zA-Z0-9]$"
)
K8S_RESERVED_KEY_PREFIXES = ("kubernetes.io", "k8s.io")
class GKECommandMixin(object):
GKE_UI_LINK_FORMAT = (
"https://console.cloud.google.com/kubernetes"
"/deployment/{region}/{cluster}/{namespace}/{app}"
"/overview?project={gke_project}"
)
# NOTE : This command requires a job_dir attribute
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._deployment_config = None
self._kubernetes_client = None
self._kubernetes_active_context = None
@property
def kubernetes_client(self):
if not self._kubernetes_client:
# TODO: This grabs configs from '~/.kube/config'. @shireenk
# We should add a check that this file exists
# If it does not exist then we should create configurations.
# See link:
# https://github.com/kubernetes-client/python-base/blob/master/config/kube_config.py#L825
k8s_config.load_kube_config()
self._kubernetes_client = k8s_client.AppsV1Api()
return self._kubernetes_client
@property
def kubernetes_active_context(self):
if not self._kubernetes_active_context:
_, active_context = k8s_config.list_kube_config_contexts()
self._kubernetes_active_context = active_context
return self._kubernetes_active_context
@property
def deployment_config(self):
if not self._deployment_config:
path_to_deployment_config = os.path.join(
self.job_dir, "kubernetes", "deployment.yaml"
)
with open(path_to_deployment_config) as f:
self._deployment_config = yaml.safe_load(f)
return self._deployment_config
def _deployment_exists(self):
"""Check to see if a deployment already exists
Returns:
bool: Whether a deployment for the given name-namespace
combination exists
"""
dep = self.deployment_config
namespace = glom.glom(dep, "metadata.namespace")
deployment_name = glom.glom(dep, "metadata.name")
resp = self.kubernetes_client.list_namespaced_deployment(
namespace=namespace,
)
for i in resp.items:
if i.metadata.name == deployment_name:
return True
return False
def _update_deployment(self, replica_count=None, image_tag=None):
"""This will update a deployment with a provided
replica count or image tag
Args:
replica_count (int): Number of replicas the
deployment will be updated with.
If not provided then this will not be changed
image_tag (str): The image tag that will be applied
to the updated deployment.
If not provided then this will not be updated.
"""
deployment_name = glom.glom(self.deployment_config, "metadata.name")
namespace = glom.glom(self.deployment_config, "metadata.namespace")
log_messages = []
if replica_count is not None:
glom.assign(
self._deployment_config, "spec.replicas", replica_count
)
log_messages.append(f"Scaled deployment to {replica_count}")
if image_tag:
image_path = "spec.template.spec.containers.0.image"
image_base = glom.glom(self._deployment_config, image_path)
# Strip off existing image tag if present
image_base = re.split(":", image_base)[0]
full_image = image_base + f":{image_tag}"
glom.assign(self._deployment_config, image_path, full_image)
log_messages.append(
f"Update deployment with image tag {image_tag}"
)
resp = self.kubernetes_client.patch_namespaced_deployment(
name=deployment_name,
namespace=namespace,
body=self.deployment_config,
)
log_messages.append(f"Update deployment with {resp.metadata.name}")
for message in log_messages:
logging.info(message)
ui_link = self._build_ui_link_from_current_context()
logging.info(f"Deployment details: {ui_link}")
def _build_ui_link_from_current_context(self):
context_name = self.kubernetes_active_context["name"]
# context seems to follow the format
# gke_{project}_{region}_{clustername}
_, gke_project, region, current_cluster = context_name.split("_")
dep = self.deployment_config
namespace = glom.glom(dep, "metadata.namespace")
deployment_name = glom.glom(dep, "metadata.name")
link = GKECommandMixin.GKE_UI_LINK_FORMAT.format(
region=region,
cluster=current_cluster,
namespace=namespace,
app=deployment_name,
gke_project=gke_project,
)
return link
class RunPipelineGKE(GKECommandMixin, base.BaseDockerizedPipeline):
def __init__(
self, job_dir, klio_config, docker_runtime_config, run_job_config
):
super().__init__(job_dir, klio_config, docker_runtime_config)
self.run_job_config = run_job_config
def _apply_image_to_deployment_config(self):
image_tag = self.docker_runtime_config.image_tag
pipeline_options = self.klio_config.pipeline_options
if image_tag:
dep = self.deployment_config
image_path = "spec.template.spec.containers.0.image"
# TODO: If more than one image deployed,
# we need to search for correct container
image_base = glom.glom(dep, image_path)
# Strip off existing image tag if any
image_base = re.split(":", image_base)[0]
full_image = f"{image_base}:{image_tag}"
glom.assign(self._deployment_config, image_path, full_image)
# Check to see if the kubernetes image to be deployed is the same
# image that is built
k8s_image = glom.glom(self.deployment_config, image_path)
built_image_base = pipeline_options.worker_harness_container_image
built_image = f"{built_image_base}:{image_tag}"
if built_image != k8s_image:
logging.warning(
f"Image deployed by kubernetes {k8s_image} does not match "
f"the built image {built_image}. "
"This may result in an `ImagePullBackoff` for the deployment. "
"If this is not intended, please change "
"`pipeline_options.worker_harness_container_image` "
"and rebuild or change the container image"
"set in kubernetes/deployment.yaml file."
)
@staticmethod
def _validate_labels(label_path, label_dict):
help_url = (
"https://kubernetes.io/docs/concepts/overview/working-with-objects"
"/labels/#syntax-and-character-set"
)
for key, value in label_dict.items():
# Both key and value must be strings
if not isinstance(key, str):
raise ValueError(f"Key '{label_path}.{key}' must be a string.")
if not isinstance(value, str):
raise ValueError(
f"Value '{value}' for key '{label_path}.{key}' must be a "
"string"
)
# Handle any prefixes in keys
if "/" in key:
# validate that there's at most one forward slash
prefix, *name = key.split("/")
if len(name) > 1:
raise ValueError(
f"Unsupported key name in {label_path}: '{key}' "
f"contains more than one forward slash. See {help_url} "
"for valid label keys."
)
# validate prefix
prefix_match = K8S_LABEL_KEY_PREFIX_REGEX.match(prefix)
if (
prefix_match is None
or prefix in K8S_RESERVED_KEY_PREFIXES
or len(prefix) > 253
):
raise ValueError(
f"Unsupported prefix key name in {label_path}: "
f"'{prefix}'. See {help_url} for valid label key "
"prefixes."
)
key = name[0]
# Validate the key
key_match = K8S_LABEL_KEY_NAME_REGEX.match(key)
if key_match is None:
raise ValueError(
f"Unsupported key name in {label_path}: '{key}'. "
f"See {help_url} for valid label keys."
)
# Validate the value
value_match = K8S_LABEL_VALUE_REGEX.match(value)
if not value_match:
raise ValueError(
f"Unsupported value '{value}' for '{label_path}.{key}'. "
f"See {help_url} for valid values."
)
def _apply_labels_to_deployment_config(self):
# `metadata.labels` are a best practices thing, but not required
# (these would be "deployment labels"). At least one label defined in
# `spec.template.metadata.labels` is required for k8s deployments
# ("pod labels").
# There also must be at least one "selector label"
# (`spec.selector.matchLabels`) which connects the deployment to pod.
# More info: https://stackoverflow.com/a/54854179
# TODO: add environment labels if/when we support dev/test/prod envs
metadata_labels, pod_labels, selector_labels = {}, {}, {}
# standard practice labels ("app" and "role")
existing_metadata_labels = glom.glom(
self.deployment_config, "metadata.labels", default={}
)
metadata_app = glom.glom(existing_metadata_labels, "app", default=None)
if not metadata_app:
job_name = self.klio_config.job_name
metadata_labels["app"] = job_name
metadata_labels.update(existing_metadata_labels)
existing_pod_labels = glom.glom(
self.deployment_config, "spec.template.metadata.labels", default={}
)
pod_app = glom.glom(existing_pod_labels, "app", default=None)
pod_role = glom.glom(existing_pod_labels, "role", default=None)
if not pod_app:
pod_app = metadata_labels["app"]
if not pod_role:
# just drop hyphens from `app` value
pod_role = "".join(pod_app.split("-"))
pod_labels["app"] = pod_app
pod_labels["role"] = pod_role
pod_labels.update(existing_pod_labels)
existing_selector_labels = glom.glom(
self.deployment_config, "spec.selector.matchLabels", default={}
)
selector_app = glom.glom(existing_selector_labels, "app", default=None)
selector_role = glom.glom(
existing_selector_labels, "role", default=None
)
if not selector_app:
selector_labels["app"] = pod_labels["app"]
if not selector_role:
selector_labels["role"] = pod_labels["role"]
selector_labels.update(existing_selector_labels)
# klio-specific labels
pod_labels["klio/klio_cli_version"] = klio_cli_version
# deployment labels
deploy_user = os.environ.get("USER", "unknown")
if os.environ.get("CI", "").lower() == "true":
deploy_user = "ci"
pod_labels["klio/deployed_by"] = deploy_user
# any user labels from klio_config.pipeline_options
# note: if pipeline_options.label (singular) is define in
# klio-job.yaml, klio-core appends it to pipeline_options.labels
# (plural) automatically
user_labels_list = self.klio_config.pipeline_options.labels
# user labels in beam/klio config are lists of strings, where the
# strings are key=value pairs, e.g. "keyfoo=valuebar"
user_labels = {}
for user_label in user_labels_list:
if "=" not in user_label:
# skip - not a valid label; this should technically be
# caught when validating configuration (not yet implemented)
continue
# theoretically user_label could be key=value1=value2, so
# we just take the first one, but value1=value2 is not
# valid and will be caught during validation below.
key, value = user_label.split("=", 1)
user_labels[key] = value
pod_labels.update(user_labels)
path_to_labels = (
("metadata.labels", metadata_labels),
("spec.selector.matchLabels", selector_labels),
("spec.template.metadata.labels", pod_labels),
)
for label_path, label_dict in path_to_labels:
# raises if not valid
RunPipelineGKE._validate_labels(label_path, label_dict)
glom.assign(
self.deployment_config, label_path, label_dict, missing=dict
)
def _apply_deployment(self):
"""Create a namespaced deploy if the deployment does not already exist.
If the namespaced deployment already exists then
`self.run_job_config.update` will determine if the
deployment will be updated or not.
"""
dep = self.deployment_config
namespace = glom.glom(dep, "metadata.namespace")
deployment_name = glom.glom(dep, "metadata.name")
if not self._deployment_exists():
resp = self.kubernetes_client.create_namespaced_deployment(
body=dep, namespace=namespace
)
deployment_name = resp.metadata.name
current_cluster = self.kubernetes_active_context["name"]
ui_link = self._build_ui_link_from_current_context()
logging.info(
f"Deployment created for {deployment_name} "
f"in cluster {current_cluster}. "
f"Deployment details: {ui_link}"
)
else:
if self.run_job_config.update:
self._update_deployment()
else:
logging.warning(
f"Cannot apply deployment for {deployment_name}. "
"To update an existing deployment, run "
"`klio job run --update`, or set `pipeline_options.update`"
" to `True` in the job's`klio-job.yaml` file. "
"Run `klio job stop` to | |
Test to verify :func: `validate_token` and then authenticate
"""
core, root = core_and_root([make_example_internal_api(self)])
# Authenticate the impersonator (admin user 1)
(response0, json_body0) = authenticate_with_token(
self, root,
tenant_id="111111",
token_id="123456a")
self.assertEqual(200, response0.code)
impersonator_token1 = json_body0["access"]["token"]["id"]
# Authenticate the impersonator (admin user 2)
(response1, json_body1) = authenticate_with_token(
self, root,
tenant_id="222222",
token_id="123456b")
self.assertEqual(200, response1.code)
impersonator_token2 = json_body1["access"]["token"]["id"]
# Authenticate the impersonatee using the username so we know the
# tenant_id to make the validate token id call with 'belongsTo'
(response2, json_body2) = authenticate_with_username_password(
self, root,
username="test1",
tenant_id="12345")
self.assertEqual(200, response2.code)
# Impersonate user test1 using admin user1's token
(response3, json_body3) = impersonate_user(
self, root,
username="test1",
impersonator_token=impersonator_token1)
self.assertEqual(200, response3.code)
impersonated_token1 = json_body3["access"]["token"]["id"]
# Impersonate user test1 using admin user2's token
(response4, json_body4) = impersonate_user(
self, root,
username="test1",
impersonator_token=impersonator_token2)
self.assertEqual(200, response4.code)
impersonated_token2 = json_body4["access"]["token"]["id"]
# validate the impersonated_token1
(response5, json_body5) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/{0}?belongsTo=12345".format(
impersonated_token1)
))
self.assertEqual(200, response5.code)
self.assertTrue(json_body5["access"]["RAX-AUTH:impersonator"])
self.assertEqual(json_body5["access"]["RAX-AUTH:impersonator"]["name"],
json_body0["access"]["user"]["name"])
# validate the impersonated_token2
(response6, json_body6) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/{0}?belongsTo=12345".format(
impersonated_token2)
))
self.assertEqual(200, response6.code)
self.assertTrue(json_body6["access"]["RAX-AUTH:impersonator"])
self.assertEqual(json_body6["access"]["RAX-AUTH:impersonator"]["name"],
json_body1["access"]["user"]["name"])
def test_response_for_validate_token_with_maas_admin_role(self):
"""
Test to verify :func: `validate_token` when the token_id provided
is of an maas admin user specified in `mimic_presets`.
"""
core, root = core_and_root([make_example_internal_api(self)])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/this_is_an_impersonator_token"
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["RAX-AUTH:impersonator"]["roles"][0]["name"],
"monitoring:service-admin")
def test_response_for_validate_token_with_racker_role(self):
"""
Test to verify :func: `validate_token` when the token_id provided
is of a racker specified in `mimic_presets`.
"""
core, root = core_and_root([make_example_internal_api(self)])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/this_is_a_racker_token"
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["RAX-AUTH:impersonator"]["roles"][0]["name"],
"Racker")
def test_response_for_validate_token_when_invalid(self):
"""
Test to verify :func: `validate_token` when the token_id provided
is invalid, as specified in `mimic_presets`.
"""
core, root = core_and_root([make_example_internal_api(self)])
token = get_presets["identity"]["token_fail_to_auth"][0]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/{0}".format(token)
))
self.assertEqual(401, response.code)
def test_response_for_validate_token_with_observer_role(self):
"""
Test to verify :func: `validate_token` when the tenant_id provided
is of an observer role, as specified in `mimic_presets`.
"""
core, root = core_and_root([make_example_internal_api(self)])
token = get_presets["identity"]["observer_role"][0]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/any_token?belongsTo={0}".format(token)
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["user"]["roles"][0]["name"],
"observer")
self.assertEqual(json_body["access"]["user"]["roles"][0]["description"],
"Global Observer Role.")
def test_response_for_validate_token_with_creator_role(self):
"""
Test to verify :func: `validate_token` when the tenant_id provided
is of an creator role, as specified in `mimic_presets`.
"""
core, root = core_and_root([make_example_internal_api(self)])
token = get_presets["identity"]["creator_role"][0]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/any_token?belongsTo={0}".format(token)
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["user"]["roles"][0]["name"],
"creator")
self.assertEqual(json_body["access"]["user"]["roles"][0]["description"],
"Global Creator Role.")
def test_response_for_validate_token_with_admin_and_observer_role(self):
"""
Test to verify :func: `validate_token` when the tenant_id provided
is of an admin role, as specified in `mimic_presets`.
"""
core, root = core_and_root([make_example_internal_api(self)])
token = get_presets["identity"]["admin_role"][0]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/tokens/any_token?belongsTo={0}".format(token)
))
self.assertEqual(200, response.code)
self.assertEqual(json_body["access"]["user"]["roles"][0]["name"],
"admin")
self.assertEqual(json_body["access"]["user"]["roles"][0]["description"],
"Global Admin Role.")
self.assertEqual(json_body["access"]["user"]["roles"][1]["name"],
"observer")
self.assertEqual(json_body["access"]["user"]["roles"][1]["description"],
"Global Observer Role.")
def test_response_for_list_users(self):
"""
Test to verify :func: `get_users_details`.
"""
core, root = core_and_root([make_example_internal_api(self)])
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/users?name=random_user"
))
self.assertEqual(200, response.code)
self.assertTrue(json_body['user']['RAX-AUTH:domainId'])
def test_response_for_list_users_after_authentication(self):
"""
Test to verify :func: `get_users_details`.
"""
core, root = core_and_root([make_example_internal_api(self)])
(response, json_body) = authenticate_with_token(
self, root, tenant_id="12345")
self.assertEqual(response.code, 200)
username = json_body["access"]["user"]["name"]
user_id = json_body["access"]["user"]["id"]
(response, json_body) = self.successResultOf(json_request(
self, root, b"GET",
"http://mybase/identity/v2.0/users?name={0}".format(username)
))
self.assertEqual(200, response.code)
self.assertEqual(json_body['user']['RAX-AUTH:domainId'], "12345")
self.assertEqual(json_body['user']['username'], username)
self.assertEqual(json_body['user']['id'], user_id)
class AuthIntegrationTests(SynchronousTestCase):
"""
Tests that combine multiple auth calls together and assure that they
return consistent data.
"""
def test_user_for_tenant_then_impersonation(self):
"""
After authenticating once as a particular tenant, get the user that
tenant, then attempt to impersonate that user. The tenant IDs should
be the same. This is an autoscale regression test.
"""
core, root = core_and_root([make_example_internal_api(self)])
tenant_id = "111111"
# authenticate as that user - this is not strictly necessary, since
# getting a user for a tenant should work regardless of whether a user
# was previously in the system, but this will ensure that we can check
# the username
response, json_body = authenticate_with_username_password(
self, root, username="my_user", tenant_id=tenant_id)
self.assertEqual(200, response.code)
self.assertEqual(tenant_id,
json_body['access']['token']['tenant']['id'])
# get user for tenant
response, json_body = self.successResultOf(json_request(
self, root, b"GET", "/identity/v1.1/mosso/111111"))
self.assertEqual(301, response.code)
user = json_body['user']['id']
self.assertEqual("my_user", user)
# impersonate this user
response, json_body = impersonate_user(self, root, username=user)
self.assertEqual(200, response.code)
token = json_body["access"]['token']["id"]
# get endpoints for this token, see what the tenant is
response, json_body = self.successResultOf(json_request(
self, root, b"GET",
"/identity/v2.0/tokens/{0}/endpoints".format(token)))
self.assertEqual(200, response.code)
self.assertEqual(tenant_id,
json_body["endpoints"][0]["tenantId"])
# authenticate with this token and see what the tenant is
response, json_body = authenticate_with_token(
self, root, token_id=token, tenant_id=tenant_id)
self.assertEqual(tenant_id,
json_body['access']['token']['tenant']['id'])
def test_api_key_then_other_token_same_tenant(self):
"""
After authenticating as a particular tenant with an API key,
authenticate as the same tenant with a token that is different
from the one returned by the API key response. Both tokens
should be accessing the same session.
"""
core, root = core_and_root([make_example_internal_api(self)])
tenant_id = "123456"
response, json_body = authenticate_with_api_key(
self, root, tenant_id=tenant_id)
self.assertEqual(200, response.code)
username_from_api_key = json_body["access"]["user"]["name"]
response, json_body = authenticate_with_token(
self, root, token_id="fake_<PASSWORD>", tenant_id=tenant_id)
self.assertEqual(200, response.code)
username_from_token = json_body["access"]["user"]["name"]
# Since usernames are generated if not specified, and token
# authentication does not specify a username, it is sufficient
# to check that the usernames are equal. If the sessions are
# distinct, then the token would have generated a UUID for its
# username.
self.assertEqual(username_from_api_key, username_from_token)
auth_behavior_endpoint = (
"http://mybase/mimic/v1.1/IdentityControlAPI/behaviors/auth")
@behavior_tests_helper_class
class IdentityAuthBehaviorControlPlane(object):
"""
Helper object used to generate tests for Nova create server behavior
CRUD operations.
"""
criteria = [{"username": "failme"}]
names_and_params = (
("fail",
{"message": "Auth failure", "code": 500, "type": "identityFault"}),
("fail",
{"message": "Invalid creds", "code": 403})
)
def __init__(self, test_case):
"""
Set up the criteria, api mock, etc.
"""
self.test_case = test_case
_, self.root = core_and_root([])
self.behavior_api_endpoint = auth_behavior_endpoint
def trigger_event(self):
"""
Create server with with the name "failing_server_name".
"""
return authenticate_with_username_password(
self.test_case, self.root, username="failme")
def validate_injected_behavior(self, name_and_params, response, body):
"""
Given the behavior that is expected, validate the response and body.
"""
name, params = name_and_params
self.test_case.assertEquals(response.code, params['code'])
if params['code'] == 500:
expected = {"identityFault": {"message": "Auth failure",
"code": 500}}
else:
expected = {"unauthorized": {"message": "Invalid creds",
"code": 403}}
self.test_case.assertEquals(body, expected)
def validate_default_behavior(self, response, body):
"""
Validate the response and body of a successful server create.
"""
self.test_case.assertEquals(response.code, 200)
self.test_case.assertIn('access', body)
class IdentityBehaviorInjectionTests(SynchronousTestCase):
"""
Tests for specific failures and/or criteria.
"""
def test_username_criteria_works_on_all_auth_methods_with_username(self):
"""
Failure injection based on the username criteria will work on
username/password, username/api-key, and impersonation. But not
token ID, even if it's with the same tenant.
"""
core, root = core_and_root([])
fail_params = {"message": "Invalid creds", "code": 403}
# make sure a user exists in mimic with the given username tenant
# associated
response, body = authenticate_with_username_password(
self, root, username="failme", tenant_id="123456")
self.assertEqual(response.code, 200)
# username auths fail
register_behavior(self, root, auth_behavior_endpoint,
behavior_name="fail",
criteria=[{"username": "failme"}],
parameters=fail_params)
for auth_func in (authenticate_with_username_password,
authenticate_with_api_key,
impersonate_user):
response, body = auth_func(self, root, username="failme")
self.assertEqual(response.code, 403)
self.assertEqual(body, {"unauthorized": fail_params})
# token auth with that tenant ID succeeds
response, body = authenticate_with_token(
self, root, tenant_id="123456")
self.assertEqual(response.code, 200)
def test_tenant_id_criteria_works_on_all_auth_methods_with_tenant(self):
"""
Failure injection based on the username criteria will work on
username/password, username/api-key, and token.
But not impersonation.
"""
core, root = core_and_root([])
fail_params = {"message": "Invalid creds", "code": 403}
# make sure a user exists in mimic with the given username tenant
# associated
response, body = authenticate_with_username_password(
self, root, username="failme", tenant_id="123456")
self.assertEqual(response.code, 200)
# tenant auths fail
register_behavior(self, root, auth_behavior_endpoint,
behavior_name="fail",
criteria=[{"tenant_id": "123456"}],
parameters=fail_params)
for auth_func in (authenticate_with_username_password,
authenticate_with_api_key,
authenticate_with_token):
response, body = auth_func(self, root, tenant_id="123456")
self.assertEqual(response.code, 403)
self.assertEqual(body, {"unauthorized": fail_params})
# impersonation with that username succeeds
response, body = impersonate_user(self, root, username="failme")
self.assertEqual(response.code, 200)
def test_string_errors_as_well_as_json_errors(self):
"""
Failure injection will return a string error response as well as a
json response.
"""
core, root = core_and_root([])
fail_params = {"message": "Failure of JSON", "code": 500,
"type": "string"}
register_behavior(self, root, auth_behavior_endpoint,
behavior_name="fail",
criteria=[{"username": "failme"}],
parameters=fail_params)
response, body = authenticate_with_username_password(
self, root, username="failme", request_func=request_with_content)
self.assertEqual(response.code, 500)
self.assertEqual(body, b"Failure of JSON")
class IdentityNondedicatedFixtureTests(SynchronousTestCase):
def test_non_dedicated_tokens(self):
"""
Obtain Identity entries when presented tokens issued to non-dedicated users
"""
url = "/identity/v2.0/tokens"
core, root = core_and_root([])
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/OneTwo"))
self.assertEqual(200, response.code)
self.assertEqual(content["access"]["token"]["tenant"]["id"], "135790")
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url + "/ThreeFour"))
self.assertEqual(200, response.code)
(response, content) = self.successResultOf(
json_request(self, root, b"GET", url | |
<filename>astroquery/esa/hsa/core.py<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import cgi
import os
import re
import shutil
from pathlib import Path
from astropy import units as u
from astroquery.utils import commons
from astroquery import log
from astroquery.exceptions import LoginError
from astroquery.query import BaseQuery
from astroquery.utils.tap.core import Tap
from . import conf
__all__ = ['HSA', 'HSAClass']
class HSAClass(BaseQuery):
data_url = conf.DATA_ACTION
metadata_url = conf.METADATA_ACTION
timeout = conf.TIMEOUT
def __init__(self, tap_handler=None):
super().__init__()
if tap_handler is None:
self._tap = Tap(url=self.metadata_url)
else:
self._tap = tap_handler
def download_data(self, *, retrieval_type="OBSERVATION", observation_id=None,
instrument_name=None, filename=None, observation_oid=None,
instrument_oid=None, product_level=None, verbose=False,
download_dir="", cache=True, **kwargs):
"""
Download data from Herschel
Parameters
----------
observation_id : string, optional
id of the observation to be downloaded
The identifies of the observation we want to retrieve, 10 digits
example: 1342195355
retrieval_type : string, optional, default 'OBSERVATION'
The type of product that we want to retrieve
values: OBSERVATION, PRODUCT, POSTCARD, POSTCARDFITS, REQUESTFILE_XML, STANDALONE, UPDP, HPDP
instrument_name : string, optional, default 'PACS'
values: PACS, SPIRE, HIFI
The instrument name, by default 'PACS' if the retrieval_type is 'OBSERVATION'
filename : string, optional, default None
If the filename is not set it will use the observation_id as filename
file name to be used to store the file
verbose : bool, optional, default False
flag to display information about the process
observation_oid : string, optional
Observation internal identifies. This is the database identifier
instrument_oid : string, optional
The database identifies of the instrument
values: 1, 2, 3
product_level : string, optional
level to download
values: ALL, AUXILIARY, CALIBRATION, LEVEL0, LEVEL0_5, LEVEL1, LEVEL2, LEVEL2_5, LEVEL3, ALL-LEVEL3
download_dir : string, optional
The directory in which the file will be downloaded
Returns
-------
File name of downloaded data
"""
if filename is not None:
filename = os.path.splitext(filename)[0]
params = {'retrieval_type': retrieval_type}
if observation_id is not None:
params['observation_id'] = observation_id
if retrieval_type == "OBSERVATION" and instrument_name is None:
instrument_name = "PACS"
if instrument_name is not None:
params['instrument_name'] = instrument_name
if observation_oid is not None:
params['observation_oid'] = observation_oid
if instrument_oid is not None:
params['instrument_oid'] = instrument_oid
if product_level is not None:
params['product_level'] = product_level
link = self.data_url + "".join(f"&{key}={val}" for key, val in params.items())
link += "".join(f"&{key}={val}" for key, val in kwargs.items())
if verbose:
log.info(link)
response = self._request('HEAD', link, save=False, cache=cache)
if response.status_code == 401:
error = "Data protected by proprietary rights. Please check your credentials"
raise LoginError(error)
response.raise_for_status()
if filename is None:
if observation_id is not None:
filename = observation_id
else:
error = "Please set either 'obervation_id' or 'filename' for the output"
raise ValueError(error)
_, res_params = cgi.parse_header(response.headers['Content-Disposition'])
r_filename = res_params["filename"]
suffixes = Path(r_filename).suffixes
if len(suffixes) > 1 and suffixes[-1] == ".jpg":
filename += suffixes[-1]
else:
filename += "".join(suffixes)
filename = os.path.join(download_dir, filename)
self._download_file(link, filename, head_safe=True, cache=cache)
if verbose:
log.info(f"Wrote {link} to {filename}")
return filename
def get_observation(self, observation_id, instrument_name, *, filename=None,
observation_oid=None, instrument_oid=None, product_level=None,
verbose=False, download_dir="", cache=True, **kwargs):
"""
Download observation from Herschel.
This consists of a .tar file containing:
- The auxiliary directory: contains all Herschel non-science spacecraft data
- The calibarion directory: contains the uplink and downlink calibration products
- <obs_id> directory: contains the science data distributed in sub-directories called level0/0.5/1/2/2.5/3.
More information can be found here:
https://www.cosmos.esa.int/web/herschel/data-products-overview
Parameters
----------
observation_id : string
id of the observation to be downloaded
The identifies of the observation we want to retrieve, 10 digits
example: 1342195355
instrument_name : string
The instrument name
values: PACS, SPIRE, HIFI
filename : string, optional, default None
If the filename is not set it will use the observation_id as filename
file name to be used to store the file
verbose : bool, optional, default 'False'
flag to display information about the process
observation_oid : string, optional
Observation internal identifies. This is the database identifier
istrument_oid : string, optional
The database identifies of the instrument
values: 1, 2, 3
product_level : string, optional
level to download
values: ALL, AUXILIARY, CALIBRATION, LEVEL0, LEVEL0_5, LEVEL1, LEVEL2, LEVEL2_5, LEVEL3, ALL-LEVEL3
download_dir : string, optional
The directory in which the file will be downloaded
Returns
-------
File name of downloaded data
"""
if filename is not None:
filename = os.path.splitext(filename)[0]
params = {'retrieval_type': "OBSERVATION",
'observation_id': observation_id,
'instrument_name': instrument_name}
if observation_oid is not None:
params['observation_oid'] = observation_oid
if instrument_oid is not None:
params['instrument_oid'] = instrument_oid
if product_level is not None:
params['product_level'] = product_level
link = self.data_url + "".join(f"&{key}={val}" for key, val in params.items())
link += "".join(f"&{key}={val}" for key, val in kwargs.items())
if verbose:
log.info(link)
response = self._request('HEAD', link, save=False, cache=cache)
if response.status_code == 401:
error = "Data protected by proprietary rights. Please check your credentials"
raise LoginError(error)
response.raise_for_status()
_, res_params = cgi.parse_header(response.headers['Content-Disposition'])
r_filename = res_params["filename"]
suffixes = Path(r_filename).suffixes
if filename is None:
filename = observation_id
filename += "".join(suffixes)
filename = os.path.join(download_dir, filename)
self._download_file(link, filename, head_safe=True, cache=cache)
if verbose:
log.info(f"Wrote {link} to {filename}")
return filename
def get_postcard(self, observation_id, instrument_name, *, filename=None,
verbose=False, download_dir="", cache=True, **kwargs):
"""
Download postcard from Herschel
Parameters
----------
observation_id : string
id of the observation to be downloaded
The identifies of the observation we want to retrieve, 10 digits
example: 1342195355
instrument_name : string
The instrument name
values: PACS, SPIRE, HIFI
filename : string, optional, default None
If the filename is not set it will use the observation_id as filename
file name to be used to store the file
verbose : bool, optional, default False
flag to display information about the process
observation_oid : string, optional
Observation internal identifies. This is the database identifier
istrument_oid : string, optional
The database identifies of the instrument
values: 1, 2, 3
product_level : string, optional
level to download
values: ALL, AUXILIARY, CALIBRATION, LEVEL0, LEVEL0_5, LEVEL1, LEVEL2, LEVEL2_5, LEVEL3, ALL-LEVEL3
postcard_single : string, optional
'true' to retrieve one single postcard (main one)
values: true, false
download_dir : string, optional
The directory in which the file will be downloaded
Returns
-------
File name of downloaded data
"""
if filename is not None:
filename = os.path.splitext(filename)[0]
params = {'retrieval_type': "POSTCARD",
'observation_id': observation_id,
'instrument_name': instrument_name}
link = self.data_url + "".join(f"&{key}={val}" for key, val in params.items())
link += "".join(f"&{key}={val}" for key, val in kwargs.items())
if verbose:
log.info(link)
response = self._request('HEAD', link, save=False, cache=cache)
response.raise_for_status()
local_filepath = self._request('GET', link, cache=True, save=True)
original_filename = re.findall('filename="(.+)"',
response.headers["Content-Disposition"])[0]
_, ext = os.path.splitext(original_filename)
if filename is None:
filename = observation_id
filename += ext
filename = os.path.join(download_dir, filename)
shutil.move(local_filepath, filename)
if verbose:
log.info(f"Wrote {link} to {filename}")
return filename
def query_hsa_tap(self, query, *, output_file=None,
output_format="votable", verbose=False):
"""
Launches a synchronous job to query HSA Tabular Access Protocol (TAP) Service
Parameters
----------
query : string
query (adql) to be executed
output_file : string, optional, default None
file name where the results are saved if dumpToFile is True.
If this parameter is not provided, the jobid is used instead
output_format : string, optional, default 'votable'
values 'votable' or 'csv'
verbose : bool, optional, default 'False'
flag to display information about the process
Returns
-------
A table object
"""
job = self._tap.launch_job(query=query, output_file=output_file,
output_format=output_format,
verbose=verbose,
dump_to_file=output_file is not None)
table = job.get_results()
return table
def get_tables(self, *, only_names=True, verbose=False):
"""
Get the available table in HSA TAP service
Parameters
----------
only_names : bool, optional, default True
True to load table names only
verbose : bool, optional, default False
flag to display information about the process
Returns
-------
A list of tables
"""
tables = self._tap.load_tables(verbose=verbose)
if only_names:
return [t.name for t in tables]
else:
return tables
def get_columns(self, table_name, *, only_names=True, verbose=False):
"""
Get the available columns for a table in HSA TAP service
Parameters
----------
table_name : string
table name of which, columns will be returned
only_names : bool, optional, default True
True to load column names only
verbose : bool, optional, default False
flag to display information about the process
Returns
-------
A list of columns
"""
tables = self._tap.load_tables(verbose=verbose)
columns = None
for t in tables:
if str(t.name) == str(table_name):
columns = t.columns
break
if columns is None:
raise ValueError("table name specified was not found in "
"HSA TAP | |
<reponame>shui02/appyter-catalog
# Basic libraries
import pandas as pd
import os
import urllib3
import requests, json
import sys
import math
from collections import OrderedDict
import random
from time import sleep
import time
import operator
import numpy as np
import warnings
import shutil
from datetime import datetime
# Visualization
import seaborn as sns
import scipy.stats as ss
import plotly
from plotly import tools
import plotly.express as px
import plotly.graph_objs as go
import matplotlib.pyplot as plt; plt.rcdefaults()
from matplotlib import rcParams
from matplotlib.lines import Line2D
from matplotlib_venn import venn2, venn3
import IPython
from IPython.display import HTML, display, Markdown, IFrame, FileLink
from itertools import combinations
import base64
from scipy import stats
import chart_studio
import chart_studio.plotly as py
# Data analysis
from sklearn.decomposition import PCA
from sklearn.preprocessing import quantile_transform
from sklearn import cluster
from sklearn.metrics import silhouette_score
from sklearn.manifold import TSNE
import umap
from rpy2 import robjects
from rpy2.robjects import r, pandas2ri
from magic import MAGIC
import scanpy as sc
import anndata
import DigitalCellSorter
from maayanlab_bioinformatics.dge.characteristic_direction import characteristic_direction
def create_download_link(df, title = "Download CSV file: {}", filename = "data.csv"):
df.to_csv(filename)
html = "<a href=\"./{}\" target='_blank'>{}</a>".format(filename, title.format(filename))
return HTML(html)
def display_link(url, title=None):
if title is None:
title = url
raw_html = '<a href="%s" target="_blank">%s</a>' % (url, title)
return display(HTML(raw_html))
def display_object(counter, caption, df=None, istable=True):
if df is not None:
display(df)
if istable == True:
display(Markdown("*Table {}. {}*".format(counter, caption)))
else:
display(Markdown("*Figure {}. {}*".format(counter, caption)))
counter += 1
return counter
def autoselect_color_by(sample_metadata):
'''Automatically select a column in the sample_metadata df for coloring.
'''
color_by = None
color_type = 'categorical'
meta_col_nuniques = sample_metadata.nunique()
# pick a column with the cardinality between 2 and 10
meta_col_nuniques = meta_col_nuniques.loc[meta_col_nuniques.between(1, 30)]
if len(meta_col_nuniques) > 0:
color_by = meta_col_nuniques.index[0]
else: # pick a numeric column
is_number = np.vectorize(lambda x: np.issubdtype(x, np.number))
meta_col_dtypes = sample_metadata.dtypes
try:
meta_col_is_number = is_number(meta_col_dtypes)
if meta_col_is_number.sum() > 0:
color_by = meta_col_dtypes.loc[meta_col_is_number].index[0]
color_type = 'continuous'
except:
pass
return color_by, color_type
def run_dimension_reduction(dim_reduction_method, dataset, meta_class_column_name, magic_normalization=False, nr_genes=500, color_by='auto', color_type='categorical', plot_type='interactive'):
# top_genes = dataset.raw.to_adata().to_df().T.var(axis=1).sort_values(ascending=False)
if magic_normalization == False:
expression_dataframe = dataset.to_df()
else:
expression_dataframe = dataset.uns["magic"]
top_genes = expression_dataframe.T.var(axis=1).sort_values(ascending=False)
# Filter rows
expression_dataframe = expression_dataframe.loc[:, top_genes.index[:nr_genes]].T
# expression_dataframe is gene x sample
# Run PCA
if dim_reduction_method == "PCA":
dim_red=PCA(n_components=3)
dim_red.fit(expression_dataframe)
# Get Variance
var_explained = ['PC'+str((i+1))+'('+str(round(e*100, 1))+'% var. explained)' for i, e in enumerate(dim_red.explained_variance_ratio_)]
elif dim_reduction_method == "t-SNE":
dim_red = TSNE(n_components=3)
dim_red.fit(expression_dataframe)
var_explained = ['t-SNE 1', 't-SNE 2', 't-SNE 3']
elif dim_reduction_method == "UMAP":
dim_red = umap.UMAP(n_components=3)
dim_red.fit(expression_dataframe)
var_explained = ['UMAP 1', 'UMAP 2', 'UMAP 3']
sample_metadata = pd.DataFrame(dataset.obs.loc[:, meta_class_column_name])
# Estimate colors
if color_by == 'auto':
color_by, color_type = autoselect_color_by(sample_metadata)
# Return
dimension_reduction_results = {'result': dim_red, 'var_explained': var_explained, 'dim_reduction_method': dim_reduction_method,
'sample_metadata': sample_metadata,
'color_by': color_by, 'color_type': color_type, 'nr_genes': nr_genes,
'plot_type': plot_type}
return dimension_reduction_results
#############################################
########## 2. Plot
#############################################
def plot_dimension_reduction(dimension_reduction_results, return_data=False):
# Get results
dimension_reduction = dimension_reduction_results['result']
var_explained = dimension_reduction_results['var_explained']
sample_metadata = dimension_reduction_results['sample_metadata']
color_by = dimension_reduction_results.get('color_by')
color_type = dimension_reduction_results.get('color_type')
color_column = dimension_reduction_results['sample_metadata'][color_by] if color_by else None
if color_by:
colors = sns.color_palette(n_colors=len(color_column.unique())).as_hex()
dim_reduction_method = dimension_reduction_results["dim_reduction_method"]
sample_titles = ['<b>{}</b><br>'.format(index)+'<br>'.join('<i>{key}</i>: {value}'.format(**locals()) for key, value in rowData.items()) for index, rowData in sample_metadata.iterrows()]
if color_by and color_type == 'continuous':
marker = dict(size=5, color=color_column, colorscale='Viridis', showscale=True)
if dim_reduction_method == "PCA":
trace = go.Scatter3d(x=dimension_reduction.components_[0],
y=dimension_reduction.components_[1],
z=dimension_reduction.components_[2],
mode='markers',
hoverinfo='text',
text=sample_titles,
marker=marker)
elif dim_reduction_method == "t-SNE" or dim_reduction_method == "UMAP":
trace = go.Scatter3d(x=dimension_reduction.embedding_[:,0],
y=dimension_reduction.embedding_[:,1],
z=dimension_reduction.embedding_[:,2],
mode='markers',
hoverinfo='text',
text=sample_titles,
marker=marker)
data = [trace]
elif color_by and color_type == 'categorical' and len(color_column.unique()) <= len(colors):
# Get unique categories
unique_categories = color_column.unique()
# Define empty list
data = []
# Loop through the unique categories
for i, category in enumerate(unique_categories):
# Get the color corresponding to the category
category_color = colors[i]
# Get the indices of the samples corresponding to the category
category_indices = [i for i, sample_category in enumerate(color_column) if sample_category == category]
# Create new trace
if dim_reduction_method == "PCA":
trace = go.Scatter3d(x=dimension_reduction.components_[0][category_indices],
y=dimension_reduction.components_[1][category_indices],
z=dimension_reduction.components_[2][category_indices],
mode='markers',
hoverinfo='text',
text=[sample_titles[x] for x in category_indices],
name = category,
marker=dict(size=5, color=category_color))
elif dim_reduction_method == "t-SNE" or dim_reduction_method == "UMAP":
trace = go.Scatter3d(x=dimension_reduction.embedding_[category_indices, 0],
y=dimension_reduction.embedding_[category_indices, 1],
z=dimension_reduction.embedding_[category_indices, 2],
mode='markers',
hoverinfo='text',
text=[sample_titles[x] for x in category_indices],
name = category,
marker=dict(size=5, color=category_color))
# Append trace to data list
data.append(trace)
else:
marker = dict(size=5)
if dim_reduction_method == "PCA":
trace = go.Scatter3d(x=dimension_reduction.components_[0],
y=dimension_reduction.components_[1],
z=dimension_reduction.components_[2],
mode='markers',
hoverinfo='text',
text=sample_titles,
marker=marker)
elif dim_reduction_method == "t-SNE" or dim_reduction_method =="UMAP":
trace = go.Scatter3d(x=dimension_reduction.embedding_[:,0],
y=dimension_reduction.embedding_[:,1],
z=dimension_reduction.embedding_[:,2],
mode='markers',
hoverinfo='text',
text=sample_titles,
marker=marker)
data = [trace]
colored = '' if str(color_by) == 'None' else 'Colored by {}'.format(color_by)
layout = go.Layout(title='<b>{} Analysis | Scatter Plot</b><br><i>{}</i>'.format(dimension_reduction_results["dim_reduction_method"], colored),
hovermode='closest', margin=go.Margin(l=0,r=0,b=0,t=50), width=900,
scene=dict(xaxis=dict(title=var_explained[0]), yaxis=dict(title=var_explained[1]),zaxis=dict(title=var_explained[2])))
if return_data==True:
return data, layout
else:
fig = go.Figure(data=data, layout=layout)
if dimension_reduction_results['plot_type'] == 'interactive':
plotly.offline.iplot(fig)
else:
py.image.ishow(fig)
def normalize_magic(dataset, k=10, a=15, t='auto', n_pca=100, knn_dist='euclidean'):
magic_op = MAGIC(k=k, a=a, t=t, n_pca=n_pca, knn_dist=knn_dist)
data_magic = magic_op.fit_transform(dataset)
return data_magic.transpose()
def run_magic(dataset, dim_reduction_method, meta_class_column_name, plot_type='interactive'):
# Run imputation
dataset.uns['magic'] = normalize_magic(dataset.to_df()).T
return dataset
def run_clustergrammer(dataset, meta_class_column_name, magic_normalization=False, nr_genes=800, metadata_cols=None, filter_samples=True,gene_list=None):
# Subset the expression DataFrame using top 800 genes with largest variance
if magic_normalization == True:
data = dataset.uns["magic"]
else:
data = dataset.to_df().T
meta_df = dataset.obs
variances = np.var(data, axis=1)
srt_idx = variances.argsort()[::-1]
if gene_list == None or len(gene_list) == 0:
expr_df_sub = data.iloc[srt_idx].iloc[:nr_genes]
else:
gene_list = gene_list.split("\n")
common_gene_list = list(set(gene_list).intersection(set(data.index)))
expr_df_sub = data.loc[common_gene_list, :]
assert len(expr_df_sub.index) > 0
# prettify sample names
sample_names = ['::'.join([y, x]) for x,y in
zip(meta_df[meta_class_column_name], expr_df_sub.columns)]
expr_df_sub.columns = sample_names
expr_df_sub.index = ["Gene: "+str(x) for x in expr_df_sub.index]
sample_name = ["Sample: "+x for x in sample_names]
expr_df_sub.columns = sample_name
treatment_type = ["Class: "+ x.split("::")[1] for x in sample_names]
new_series = pd.DataFrame(treatment_type).T
new_series.columns = expr_df_sub.columns
expr_df_sub = pd.concat([new_series, expr_df_sub], axis=0)
index_list = list(expr_df_sub.index)
index_list = ["" if "Gene" not in str(x) else x for x in index_list]
expr_df_sub.index = index_list
#subset of expr_df_sub
if len(expr_df_sub.columns) > 50:
print("Input data is too large. Random sampling (n=50) is performed.")
expr_df_sub = expr_df_sub.sample(50, axis=1)
expr_df_sub_file = "expr_df_sub_file.txt"
expr_df_sub.to_csv("expr_df_sub_file.txt", sep='\t')
# POST the expression matrix to Clustergrammer and get the URL
clustergrammer_url = 'https://amp.pharm.mssm.edu/clustergrammer/matrix_upload/'
r = requests.post(clustergrammer_url, files={'file': open(expr_df_sub_file, 'rb')}).text
return r
#############################################
########## 2. Plot
#############################################
def plot_clustergrammar(clustergrammer_url):
clustergrammer_url = clustergrammer_url.replace("http:", "https:")
display_link(clustergrammer_url, clustergrammer_url)
# Embed
display(IPython.display.IFrame(clustergrammer_url, width="1000", height="1000"))
def get_signatures(classes, dataset, method, meta_class_column_name, cluster=True, filter_genes=True):
robjects.r('''limma <- function(rawcount_dataframe, design_dataframe, filter_genes=FALSE, adjust="BH") {
# Load packages
suppressMessages(require(limma))
suppressMessages(require(edgeR))
# Convert design matrix
design <- as.matrix(design_dataframe)
# Create DGEList object
dge <- DGEList(counts=rawcount_dataframe)
# Filter genes
if (filter_genes) {
keep <- filterByExpr(dge, design)
dge <- dge[keep,]
}
# Calculate normalization factors
dge <- calcNormFactors(dge)
# Run VOOM
v <- voom(dge, plot=FALSE)
# Fit linear model
fit <- lmFit(v, design)
# Make contrast matrix
cont.matrix <- makeContrasts(de=B-A, levels=design)
# Fit
fit2 <- contrasts.fit(fit, cont.matrix)
# Run DE
fit2 <- eBayes(fit2)
# Get results
limma_dataframe <- topTable(fit2, adjust=adjust, number=nrow(rawcount_dataframe))
limma_dataframe$gene_symbol <- rownames(limma_dataframe)
# Return
return(limma_dataframe)
}
''')
expr_df = dataset.to_df().T
raw_expr_df = dataset.raw.to_adata().to_df().T
meta_df = dataset.obs
signatures = dict()
if cluster == True:
# cluster 0 vs rest
for cls1 in classes:
signature_label = " vs. ".join(["Cluster {}".format(cls1), "rest"])
cls1_sample_ids = meta_df.loc[meta_df[meta_class_column_name]!=cls1].index.tolist()
non_cls1_sample_ids = meta_df.loc[meta_df[meta_class_column_name]==cls1].index.tolist()
if method == "limma":
design_dataframe = pd.DataFrame([{'index': x, 'A': int(x in cls1_sample_ids), 'B': int(x in non_cls1_sample_ids)} for x in expr_df.columns]).set_index('index')
# limma takes raw data
processed_data = {"expression": raw_expr_df, 'design': design_dataframe}
limma = robjects.r['limma']
signature = pandas2ri.conversion.rpy2py(limma(pandas2ri.conversion.py2rpy(processed_data['expression']), pandas2ri.conversion.py2rpy(processed_data['design']), filter_genes))
signature = signature.sort_values("t", ascending=False)
elif method == "characteristic_direction":
signature = characteristic_direction(expr_df.loc[:, cls1_sample_ids], expr_df.loc[:, non_cls1_sample_ids], calculate_sig=True)
signatures[signature_label] = signature
else:
for cls1, cls2 in combinations(classes, 2):
signature_label = " vs. ".join([cls1, cls2])
cls1_sample_ids = meta_df.loc[meta_df[meta_class_column_name]==cls1].index.tolist()
cls2_sample_ids = meta_df.loc[meta_df[meta_class_column_name]==cls2].index.tolist()
if method == "limma":
design_dataframe = pd.DataFrame([{'index': x, 'A': int(x in cls1_sample_ids), 'B': int(x in cls2_sample_ids)} for x in expr_df.columns]).set_index('index')
# limma takes raw data
processed_data = {"expression": raw_expr_df, 'design': design_dataframe}
limma = robjects.r['limma']
signature = pandas2ri.conversion.rpy2py(limma(pandas2ri.conversion.py2rpy(processed_data['expression']), pandas2ri.conversion.py2rpy(processed_data['design']), filter_genes))
signature = signature.sort_values("t", ascending=False)
elif method == "characteristic_direction":
signature = characteristic_direction(expr_df.loc[:, cls1_sample_ids], expr_df.loc[:, cls2_sample_ids], calculate_sig=True)
signatures[signature_label] = signature
return signatures
def submit_enrichr_geneset(geneset, label):
ENRICHR_URL = 'https://amp.pharm.mssm.edu/Enrichr/addList'
genes_str = '\n'.join(geneset)
payload = {
'list': (None, genes_str),
'description': (None, label)
}
response = requests.post(ENRICHR_URL, files=payload)
if not response.ok:
raise Exception('Error analyzing gene list')
time.sleep(0.5)
data = json.loads(response.text)
return data
def run_enrichr(signature, signature_label, geneset_size=500, sort_genes_by='t'):
# Sort signature
signature = signature.sort_values(sort_genes_by, ascending=False)
# Get genesets
| |
Handle error elements
elif child.tag == dbgp.ELEMENT_ERROR or child.tag == dbgp.ELEMENT_PATH_ERROR:
message = 'error'
for step_child in child:
if step_child.tag == dbgp.ELEMENT_MESSAGE or step_child.tag == dbgp.ELEMENT_PATH_MESSAGE and step_child.text:
message = step_child.text
break
if default_key:
properties[default_key] = {'name': None, 'type': message, 'value': None, 'numchildren': None, 'children': None}
return properties
def has_debug_view(name=None):
"""
Determine if active window has any or specific debug view(s).
Keyword arguments:
name -- Name of debug view to search for in active window.
"""
for view in sublime.active_window().views():
if is_debug_view(view):
if name is not None:
if view.name() == name:
return True
else:
return True
return False
def is_debug_view(view):
"""
Check if view name matches debug name/title.
Keyword arguments:
view -- View reference which to check if name matches debug name/title.
"""
return view.name() == TITLE_WINDOW_BREAKPOINT or view.name() == TITLE_WINDOW_CONTEXT or view.name() == TITLE_WINDOW_STACK or view.name() == TITLE_WINDOW_WATCH
def set_layout(layout):
"""
Toggle between debug and default window layouts.
"""
# Get active window and set reference to active view
window = sublime.active_window()
previous_active = window.active_view()
# Do not set layout when disabled
if get_value(S.KEY_DISABLE_LAYOUT):
S.RESTORE_LAYOUT = window.get_layout()
set_window_value('restore_layout', S.RESTORE_LAYOUT)
S.RESTORE_INDEX = H.new_dictionary()
set_window_value('restore_index', S.RESTORE_INDEX)
return
# Show debug layout
if layout == 'debug':
debug_layout = get_value(S.KEY_DEBUG_LAYOUT, S.LAYOUT_DEBUG)
if window.get_layout() != debug_layout:
# Save current layout
S.RESTORE_LAYOUT = window.get_layout()
set_window_value('restore_layout', S.RESTORE_LAYOUT)
# Remember view indexes
S.RESTORE_INDEX = H.new_dictionary()
for view in window.views():
view_id = '%d' % view.id()
group, index = window.get_view_index(view)
S.RESTORE_INDEX[view_id] = {'group': group, 'index': index}
set_window_value('restore_index', S.RESTORE_INDEX)
# Set debug layout
window.set_layout(S.LAYOUT_NORMAL)
window.set_layout(debug_layout)
# Show previous (single) layout
else:
# Get previous layout configuration
if S.RESTORE_LAYOUT is None:
S.RESTORE_LAYOUT = get_window_value('restore_layout', S.LAYOUT_NORMAL)
if S.RESTORE_INDEX is None:
S.RESTORE_INDEX = get_window_value('restore_index', {})
# Restore layout
window.set_layout(S.LAYOUT_NORMAL)
window.set_layout(S.RESTORE_LAYOUT)
for view in window.views():
view_id = '%d' % view.id()
# Set view indexes
if view_id in H.dictionary_keys(S.RESTORE_INDEX):
v = S.RESTORE_INDEX[view_id]
window.set_view_index(view, v['group'], v['index'])
# Restore focus to previous active view
if previous_active is not None:
window.focus_view(previous_active)
def show_content(data, content=None):
"""
Show content for specific data type in assigned window view.
Note: When view does not exists, it will create one.
"""
# Hande data type
if data == DATA_BREAKPOINT:
title = TITLE_WINDOW_BREAKPOINT
content = generate_breakpoint_output()
elif data == DATA_CONTEXT:
title = TITLE_WINDOW_CONTEXT
elif data == DATA_STACK:
title = TITLE_WINDOW_STACK
elif data == DATA_WATCH:
title = TITLE_WINDOW_WATCH
content = generate_watch_output()
else:
return
# Get list of group/index for all debug views
debug_index = get_debug_index()
# Find group/index of debug view for current data type
try:
key = [debug[2] for debug in debug_index].index(title)
except ValueError:
return
# Set group and index position
group, index, _ = debug_index[key]
# Get active window and set reference to active view
window = sublime.active_window()
previous_active = window.active_view_in_group(window.active_group())
# Loop through views in active window
found = False
view = None
previous_key = -1
active_debug = None
for v in window.views():
# Search for view assigned to data type
if v.name() == title:
found = True
view = v
continue
# Adjust group/index of debug view depending on other debug view(s)
if is_debug_view(v):
try:
current_key = [debug[2] for debug in debug_index].index(v.name())
except ValueError:
continue
# Get current position of view
view_group, view_index = window.get_view_index(v)
# Recalculate group/index for debug view
current_group, current_index, _ = debug_index[current_key]
if group == current_group:
if key > previous_key and key < current_key:
index = view_index
if key > current_key:
index = view_index + 1
# Remember debug view for setting focus
if v == window.active_view_in_group(group):
active_debug = v
previous_key = current_key
# Make sure index position is not out of boundary
index_limit = len(window.views_in_group(group))
if index > index_limit:
index = index_limit
# Create new view if it does not exists
if not found:
view = window.new_file()
view.set_scratch(True)
view.set_read_only(True)
view.set_name(title)
window.set_view_index(view, group, index)
# Set focus back to active debug view
if active_debug is not None:
window.focus_view(active_debug)
# Strip .sublime-package of package name for syntax file
package_extension = '.sublime-package'
package = S.PACKAGE_FOLDER
if package.endswith(package_extension):
package = package[:-len(package_extension)]
# Configure view settings
view.settings().set('word_wrap', False)
view.settings().set('syntax', 'Packages/' + package + '/Xdebug.tmLanguage')
# Set content for view and fold all indentation blocks
view.run_command('xdebug_view_update', {'data': content, 'readonly': True})
if data == DATA_CONTEXT or data == DATA_WATCH:
view.run_command('fold_all')
# Restore focus to previous active view/group
if previous_active is not None:
window.focus_view(previous_active)
else:
window.focus_group(0)
def show_context_output(view):
"""
Show selected variable in an output panel when clicked in context window.
Keyword arguments:
view -- View reference which holds the context window.
"""
# Check if there is a debug session and context data
if S.SESSION and S.SESSION.connected and S.CONTEXT_DATA:
try:
# Get selected point in view
point = view.sel()[0]
# Check if selected point uses variable scope
if point.size() == 0 and sublime.score_selector(view.scope_name(point.a), 'variable'):
# Find variable in line which contains the point
line = view.substr(view.line(point))
pattern = re.compile(r'^(?=\$|\s)\s*(.*?)\s+=')
match = pattern.match(line)
if match:
# Get variable details from context data
variable_name = match.group(1)
variable = get_context_variable(S.CONTEXT_DATA, variable_name)
if variable:
# Convert details to text output
variables = H.new_dictionary()
variables[variable_name] = variable
data = generate_context_output(variables)
# Show context variables and children in output panel
window = sublime.active_window()
panel = window.get_output_panel('xdebug')
panel.run_command('xdebug_view_update', {'data': data})
panel.run_command('set_setting', {'setting': 'word_wrap', 'value': True})
window.run_command('show_panel', {'panel': 'output.xdebug'})
except:
pass
def show_file(filename, row=None):
"""
Open or focus file in window, which is currently being debugged.
Keyword arguments:
filename -- Absolute path of file on local device.
"""
# Check if file exists if being referred to file system
if os.path.exists(filename):
# Get active window
window = sublime.active_window()
window.focus_group(0)
# Check if file is already open
found = False
view = window.find_open_file(filename)
if view is not None:
found = True
window.focus_view(view)
# Set focus to row (line number)
show_at_row(view, row)
# Open file if not open
if not found:
view = window.open_file(filename)
window.focus_view(view)
# Set focus to row (line number) when file is loaded
S.SHOW_ROW_ONLOAD[filename] = row
def show_panel_content(content):
# Show response data in output panel
try:
window = sublime.active_window()
panel = window.get_output_panel('xdebug')
panel.run_command('xdebug_view_update', {'data': content})
panel.run_command('set_setting', {'setting': 'word_wrap', 'value': True})
window.run_command('show_panel', {'panel': 'output.xdebug'})
except:
print(content)
def show_at_row(view, row=None):
"""
Scroll the view to center on the given row (line number).
Keyword arguments:
- view -- Which view to scroll to center on row.
- row -- Row where to center the view.
"""
if row is not None:
try:
# Convert row (line number) to region
row_region = rows_to_region(row)[0].a
# Scroll the view to row
view.show_at_center(row_region)
# Highlight row by selection
selection = view.sel()
selection.clear()
selection.add(sublime.Region(row_region, row_region))
except IndexError:
# In case rows_to_region returns empty list
pass
def rows_to_region(rows):
"""
Convert rows (line numbers) to a region (selection/cursor position).
Keyword arguments:
- rows -- Row number(s) to convert to region(s).
"""
# Get current active view
view = sublime.active_window().active_view()
# Unable to convert rows to regions when no view available
if view is None:
return
# List for containing regions to return
region = []
# Create list if it is a singleton
if not isinstance(rows, list):
rows = [rows]
for row in rows:
# Check if row is a digit
if isinstance(row, int) or H.is_digit(row):
# Convert from 1 based to a 0 based row (line) number
row_number = int(row) - 1
# Calculate offset point for row
offset_point = view.text_point(row_number, 0)
# Get region for row by offset point
region_row = view.line(offset_point)
# Add to list for result
region.append(region_row)
return region
def region_to_rows(region=None, filter_empty=False):
"""
Convert a region (selection/cursor position) to rows (line numbers).
Keyword arguments:
- region -- sublime.Selection/sublime.RegionSet or sublime.Region to convert to row number(s).
- filter_empty -- Filter empty rows (line numbers).
"""
# Get current active view
view = sublime.active_window().active_view()
# Unable to convert regions to rows when no view available
if view is None:
return
# Use current selection/cursor position if no region defined
if region is None:
region = view.sel()
# List for containing rows (line numbers) to return
rows = []
# Create list if it is a singleton
if isinstance(region, | |
= Pipeline([node(biconcat, ["input", "input1"], "output1", name="a")])
pipeline2 = Pipeline([node(biconcat, ["input", "input1"], "output1", name="a")])
new_pipeline = pipeline1 - pipeline2
assert new_pipeline.inputs() == set()
assert new_pipeline.outputs() == set()
assert not new_pipeline.nodes
def test_invalid_remove(self):
p = Pipeline([])
pattern = r"unsupported operand type\(s\) for -: 'Pipeline' and 'str'"
with pytest.raises(TypeError, match=pattern):
p - "hello" # pylint: disable=pointless-statement
def test_combine_same_node(self):
"""Multiple (identical) pipelines are possible"""
pipeline1 = Pipeline(
[node(biconcat, ["input", "input1"], ["output"], name="a")]
)
pipeline2 = Pipeline(
[node(biconcat, ["input", "input1"], ["output"], name="a")]
)
new_pipeline = pipeline1 + pipeline2
assert new_pipeline.inputs() == {"input", "input1"}
assert new_pipeline.outputs() == {"output"}
assert {n.name for n in new_pipeline.nodes} == {"a"}
def test_intersection(self):
pipeline1 = Pipeline(
[
node(biconcat, ["input", "input1"], "output1", name="a"),
node(biconcat, ["input", "input2"], "output2", name="b"),
]
)
pipeline2 = Pipeline([node(biconcat, ["input", "input2"], "output2", name="b")])
new_pipeline = pipeline1 & pipeline2
assert new_pipeline.inputs() == {"input", "input2"}
assert new_pipeline.outputs() == {"output2"}
assert {n.name for n in new_pipeline.nodes} == {"b"}
def test_invalid_intersection(self):
p = Pipeline([])
pattern = r"unsupported operand type\(s\) for &: 'Pipeline' and 'str'"
with pytest.raises(TypeError, match=pattern):
p & "hello" # pylint: disable=pointless-statement
def test_union(self):
pipeline1 = Pipeline(
[
node(biconcat, ["input", "input1"], "output1", name="a"),
node(biconcat, ["input", "input2"], "output2", name="b"),
]
)
pipeline2 = Pipeline([node(biconcat, ["input", "input2"], "output2", name="b")])
new_pipeline = pipeline1 | pipeline2
assert new_pipeline.inputs() == {"input", "input1", "input2"}
assert new_pipeline.outputs() == {"output1", "output2"}
assert {n.name for n in new_pipeline.nodes} == {"a", "b"}
def test_invalid_union(self):
p = Pipeline([])
pattern = r"unsupported operand type\(s\) for |: 'Pipeline' and 'str'"
with pytest.raises(TypeError, match=pattern):
p | "hello" # pylint: disable=pointless-statement
def test_empty_case(self):
"""Empty pipeline is possible"""
Pipeline([])
def test_initialized_with_tags(self):
pipeline = Pipeline(
[node(identity, "A", "B", tags=["node1", "p1"]), node(identity, "B", "C")],
tags=["p1", "p2"],
)
node1 = pipeline.grouped_nodes[0].pop()
node2 = pipeline.grouped_nodes[1].pop()
assert node1.tags == {"node1", "p1", "p2"}
assert node2.tags == {"p1", "p2"}
def test_node_unique_confirms(self):
"""Test that unique dataset confirms don't break pipeline concatenation"""
pipeline1 = Pipeline([node(identity, "input1", "output1", confirms="output1")])
pipeline2 = Pipeline([node(identity, "input2", "output2", confirms="other")])
pipeline3 = Pipeline([node(identity, "input3", "output3")])
combined = pipeline1 + pipeline2 + pipeline3
assert len(combined.nodes) == 3
def pipeline_with_circle():
return [
node(identity, "A", "B", name="node1"),
node(identity, "B", "C", name="node2"),
node(identity, "C", "A", name="node3"), # circular dependency
]
def non_unique_node_outputs():
return [
node(identity, "A", ["B", "C"], name="node1"),
node(identity, "C", ["D", "E", "F"], name="node2"),
# D, E non-unique
node(identity, "B", dict(out1="D", out2="E"), name="node3"),
node(identity, "D", ["E"], name="node4"), # E non-unique
]
class TestInvalidPipeline:
def test_circle_case(self):
pattern = "Circular dependencies"
with pytest.raises(CircularDependencyError, match=pattern):
Pipeline(pipeline_with_circle())
def test_unique_outputs(self):
with pytest.raises(OutputNotUniqueError, match=r"\['D', 'E'\]"):
Pipeline(non_unique_node_outputs())
def test_none_case(self):
with pytest.raises(ValueError, match="is None"):
Pipeline(None)
@pytest.mark.parametrize(
"target_node_names", [["node2", "node3", "node4", "NaN"], ["invalid"]]
)
def test_only_nodes_missing(self, pipeline_list_with_lists, target_node_names):
pattern = r"Pipeline does not contain nodes"
full = Pipeline(pipeline_list_with_lists["nodes"])
with pytest.raises(ValueError, match=pattern):
full.only_nodes(*target_node_names)
@pytest.mark.parametrize("namespace", ["katie", None])
def test_only_nodes_with_namespace_empty(self, namespace):
pipeline = Pipeline([node(identity, "A", "B", namespace=namespace)])
pattern = r"Pipeline does not contain nodes"
with pytest.raises(ValueError, match=pattern):
pipeline.only_nodes_with_namespace("non_existent")
def test_duplicate_free_nodes(self):
pattern = (
"Pipeline nodes must have unique names. The following node "
"names appear more than once:\n\nFree nodes:\n - same_name"
)
with pytest.raises(ValueError, match=re.escape(pattern)):
Pipeline(
[
node(identity, "in1", "out1", name="same_name"),
node(identity, "in2", "out2", name="same_name"),
]
)
pipeline = Pipeline([node(identity, "in1", "out1", name="same_name")])
another_node = node(identity, "in2", "out2", name="same_name")
with pytest.raises(ValueError, match=re.escape(pattern)):
# 'pipeline' passes the check, 'another_node' doesn't
Pipeline([pipeline, another_node])
def test_duplicate_nodes_in_pipelines(self):
pipeline = Pipeline(
[node(biconcat, ["input", "input1"], ["output", "output1"], name="node")]
)
pattern = (
r"Pipeline nodes must have unique names\. The following node "
r"names appear more than once\:\n\nPipeline\(\[.+\]\)\:\n \- node"
)
with pytest.raises(ValueError, match=pattern):
# the first 'pipeline' passes the check, the second doesn't
Pipeline([pipeline, pipeline])
another_node = node(identity, "in1", "out1", name="node")
with pytest.raises(ValueError, match=pattern):
# 'another_node' passes the check, 'pipeline' doesn't
Pipeline([another_node, pipeline])
def test_bad_combine_node(self):
"""Node cannot be combined to pipeline."""
fred = node(identity, "input", "output")
pipeline = Pipeline([fred])
with pytest.raises(TypeError):
pipeline + fred # pylint: disable=pointless-statement
def test_bad_combine_int(self):
"""int cannot be combined to pipeline, tests __radd__"""
fred = node(identity, "input", "output")
pipeline = Pipeline([fred])
with pytest.raises(TypeError):
_ = 1 + pipeline
def test_conflicting_names(self):
"""Node names must be unique."""
pipeline1 = Pipeline(
[node(biconcat, ["input", "input1"], ["output1"], name="a")]
)
new_pipeline = Pipeline(
[node(biconcat, ["input", "input1"], ["output2"], name="a")]
)
pattern = (
"Pipeline nodes must have unique names. The following node names "
"appear more than once:\n\nFree nodes:\n - a"
)
with pytest.raises(ValueError, match=re.escape(pattern)):
pipeline1 + new_pipeline # pylint: disable=pointless-statement
def test_conflicting_outputs(self):
"""Node outputs must be unique."""
pipeline1 = Pipeline(
[node(biconcat, ["input", "input1"], ["output", "output1"], name="a")]
)
new_pipeline = Pipeline(
[node(biconcat, ["input", "input2"], ["output", "output2"], name="b")]
)
with pytest.raises(OutputNotUniqueError, match=r"\['output'\]"):
pipeline1 + new_pipeline # pylint: disable=pointless-statement
def test_duplicate_node_confirms(self):
"""Test that non-unique dataset confirms break pipeline concatenation"""
pipeline1 = Pipeline([node(identity, "input1", "output1", confirms="other")])
pipeline2 = Pipeline(
[node(identity, "input2", "output2", confirms=["other", "output2"])]
)
with pytest.raises(ConfirmNotUniqueError, match=r"\['other'\]"):
pipeline1 + pipeline2 # pylint: disable=pointless-statement
@pytest.fixture
def complex_pipeline(pipeline_list_with_lists):
nodes = pipeline_list_with_lists["nodes"]
pipeline = Pipeline(nodes)
return pipeline
class TestComplexPipeline:
def test_from_inputs(self, complex_pipeline):
"""F and H are inputs of node1, node2 and node3."""
new_pipeline = complex_pipeline.from_inputs("F", "H")
nodes = {node.name for node in new_pipeline.nodes}
assert len(new_pipeline.nodes) == 3
assert nodes == {"node1", "node2", "node3"}
def test_from_inputs_unknown(self, complex_pipeline):
"""W and Z do not exist as inputs."""
with pytest.raises(ValueError, match=r"\['W', 'Z'\]"):
complex_pipeline.from_inputs("Z", "W", "E", "C")
def test_only_nodes_with_inputs(self, complex_pipeline):
"""node1 and node2 require H as an input."""
new_pipeline = complex_pipeline.only_nodes_with_inputs("H")
nodes = {node.name for node in new_pipeline.nodes}
assert len(new_pipeline.nodes) == 2
assert nodes == {"node1", "node2"}
def test_only_nodes_with_inputs_unknown(self, complex_pipeline):
with pytest.raises(ValueError, match="['W', 'Z']"):
complex_pipeline.only_nodes_with_inputs("Z", "W", "E", "C")
def test_only_nodes_with_outputs(self, complex_pipeline):
"""node4 require F and H as outputs."""
new_pipeline = complex_pipeline.only_nodes_with_outputs("F", "H")
nodes = {node.name for node in new_pipeline.nodes}
assert len(new_pipeline.nodes) == 1
assert nodes == {"node4"}
def test_only_nodes_with_outputs_unknown(self, complex_pipeline):
with pytest.raises(ValueError, match="['W', 'Z']"):
complex_pipeline.only_nodes_with_outputs("Z", "W", "E", "C")
def test_to_outputs(self, complex_pipeline):
"""New pipeline contain all nodes to produce F and H outputs."""
new_pipeline = complex_pipeline.to_outputs("F", "H")
nodes = {node.name for node in new_pipeline.nodes}
assert len(new_pipeline.nodes) == 4
assert nodes == {"node4", "node7", "node8", "node9"}
def test_to_outputs_unknown(self, complex_pipeline):
with pytest.raises(ValueError, match=r"\['W', 'Z'\]"):
complex_pipeline.to_outputs("Z", "W", "E", "C")
def test_from_nodes(self, complex_pipeline):
"""New pipeline contain all nodes that depend on node2 and node3."""
new_pipeline = complex_pipeline.from_nodes("node3", "node2")
nodes = {node.name for node in new_pipeline.nodes}
assert len(new_pipeline.nodes) == 3
assert nodes == {"node1", "node2", "node3"}
def test_from_node_unknown(self, complex_pipeline):
pattern = r"Pipeline does not contain nodes named \['missing_node'\]"
with pytest.raises(ValueError, match=pattern):
complex_pipeline.from_nodes("missing_node")
def test_to_nodes(self, complex_pipeline):
"""New pipeline contain all nodes required by node4 and node6."""
new_pipeline = complex_pipeline.to_nodes("node4", "node6")
nodes = {node.name for node in new_pipeline.nodes}
assert len(new_pipeline.nodes) == 5
assert nodes == {"node4", "node6", "node7", "node8", "node9"}
def test_to_nodes_unknown(self, complex_pipeline):
pattern = r"Pipeline does not contain nodes named \['missing_node'\]"
with pytest.raises(ValueError, match=pattern):
complex_pipeline.to_nodes("missing_node")
def test_connected_pipeline(self, disjoint_pipeline):
"""Connect two separate pipelines."""
nodes = disjoint_pipeline["nodes"]
subpipeline = Pipeline(nodes, tags=["subpipeline"])
assert len(subpipeline.inputs()) == 2
assert len(subpipeline.outputs()) == 2
pipeline = Pipeline(
[node(identity, "C", "D", name="connecting_node"), subpipeline], tags="main"
)
assert len(pipeline.nodes) == 1 + len(nodes)
assert len(pipeline.inputs()) == 1
assert len(pipeline.outputs()) == 1
def test_node_dependencies(self, complex_pipeline):
expected = {
"node1": {"node2", "node3", "node4"},
"node2": {"node4"},
"node3": {"node4"},
"node4": {"node7"},
"node5": {"node6"},
"node6": {"node7"},
"node7": {"node8"},
"node8": {"node9"},
"node9": set(),
}
actual = {
child.name: {parent.name for parent in parents}
for child, parents in complex_pipeline.node_dependencies.items()
}
assert actual == expected
class TestPipelineDescribe:
def test_names_only(self, str_node_inputs_list):
pipeline = Pipeline(str_node_inputs_list["nodes"])
description = pipeline.describe()
desc = description.split("\n")
test_desc = [
"#### Pipeline execution order ####",
"Inputs: input1, input2",
"",
"node1",
"node2",
"",
"Outputs: input4",
"##################################",
]
assert len(desc) == len(test_desc)
for res, example in zip(desc, test_desc):
assert res == example
def test_full(self, str_node_inputs_list):
pipeline = Pipeline(str_node_inputs_list["nodes"])
description = pipeline.describe(names_only=False)
desc = description.split("\n")
test_desc = [
"#### Pipeline execution order ####",
"Inputs: input1, input2",
"",
"node1: biconcat([input1,input2]) -> [input3]",
"node2: identity([input3]) -> [input4]",
"",
"Outputs: input4",
"##################################",
]
assert len(desc) == len(test_desc)
for res, example in zip(desc, test_desc):
assert res == example
def apply_f(func: Callable) -> Callable:
@wraps(func)
def with_f(*args, **kwargs):
return func(*(f"f({a})" for a in args), **kwargs)
return with_f
def apply_g(func: Callable) -> Callable:
@wraps(func)
def with_g(*args, **kwargs):
return func(*(f"g({a})" for a in args), **kwargs)
return with_g
class TestPipelineDecorator:
def test_apply(self):
nodes = sorted(
[
node(identity, "number", "output1", name="identity1"),
| |
<reponame>sisisin/pulumi-gcp
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RouterArgs', 'Router']
@pulumi.input_type
class RouterArgs:
def __init__(__self__, *,
network: pulumi.Input[str],
bgp: Optional[pulumi.Input['RouterBgpArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
encrypted_interconnect_router: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Router resource.
:param pulumi.Input[str] network: A reference to the network to which this router belongs.
:param pulumi.Input['RouterBgpArgs'] bgp: BGP information specific to this router.
Structure is documented below.
:param pulumi.Input[str] description: User-specified description for the IP range.
:param pulumi.Input[bool] encrypted_interconnect_router: Field to indicate if a router is dedicated to use with encrypted Interconnect Attachment (IPsec-encrypted Cloud
Interconnect feature). Not currently available publicly.
:param pulumi.Input[str] name: Name of the resource. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters
long and match the regular expression `a-z?`
which means the first character must be a lowercase letter, and all
following characters must be a dash, lowercase letter, or digit,
except the last character, which cannot be a dash.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the router resides.
"""
pulumi.set(__self__, "network", network)
if bgp is not None:
pulumi.set(__self__, "bgp", bgp)
if description is not None:
pulumi.set(__self__, "description", description)
if encrypted_interconnect_router is not None:
pulumi.set(__self__, "encrypted_interconnect_router", encrypted_interconnect_router)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def network(self) -> pulumi.Input[str]:
"""
A reference to the network to which this router belongs.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: pulumi.Input[str]):
pulumi.set(self, "network", value)
@property
@pulumi.getter
def bgp(self) -> Optional[pulumi.Input['RouterBgpArgs']]:
"""
BGP information specific to this router.
Structure is documented below.
"""
return pulumi.get(self, "bgp")
@bgp.setter
def bgp(self, value: Optional[pulumi.Input['RouterBgpArgs']]):
pulumi.set(self, "bgp", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User-specified description for the IP range.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="encryptedInterconnectRouter")
def encrypted_interconnect_router(self) -> Optional[pulumi.Input[bool]]:
"""
Field to indicate if a router is dedicated to use with encrypted Interconnect Attachment (IPsec-encrypted Cloud
Interconnect feature). Not currently available publicly.
"""
return pulumi.get(self, "encrypted_interconnect_router")
@encrypted_interconnect_router.setter
def encrypted_interconnect_router(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted_interconnect_router", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters
long and match the regular expression `a-z?`
which means the first character must be a lowercase letter, and all
following characters must be a dash, lowercase letter, or digit,
except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
Region where the router resides.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class _RouterState:
def __init__(__self__, *,
bgp: Optional[pulumi.Input['RouterBgpArgs']] = None,
creation_timestamp: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
encrypted_interconnect_router: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Router resources.
:param pulumi.Input['RouterBgpArgs'] bgp: BGP information specific to this router.
Structure is documented below.
:param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format.
:param pulumi.Input[str] description: User-specified description for the IP range.
:param pulumi.Input[bool] encrypted_interconnect_router: Field to indicate if a router is dedicated to use with encrypted Interconnect Attachment (IPsec-encrypted Cloud
Interconnect feature). Not currently available publicly.
:param pulumi.Input[str] name: Name of the resource. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters
long and match the regular expression `a-z?`
which means the first character must be a lowercase letter, and all
following characters must be a dash, lowercase letter, or digit,
except the last character, which cannot be a dash.
:param pulumi.Input[str] network: A reference to the network to which this router belongs.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: Region where the router resides.
:param pulumi.Input[str] self_link: The URI of the created resource.
"""
if bgp is not None:
pulumi.set(__self__, "bgp", bgp)
if creation_timestamp is not None:
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description is not None:
pulumi.set(__self__, "description", description)
if encrypted_interconnect_router is not None:
pulumi.set(__self__, "encrypted_interconnect_router", encrypted_interconnect_router)
if name is not None:
pulumi.set(__self__, "name", name)
if network is not None:
pulumi.set(__self__, "network", network)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
@property
@pulumi.getter
def bgp(self) -> Optional[pulumi.Input['RouterBgpArgs']]:
"""
BGP information specific to this router.
Structure is documented below.
"""
return pulumi.get(self, "bgp")
@bgp.setter
def bgp(self, value: Optional[pulumi.Input['RouterBgpArgs']]):
pulumi.set(self, "bgp", value)
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@creation_timestamp.setter
def creation_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "creation_timestamp", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User-specified description for the IP range.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="encryptedInterconnectRouter")
def encrypted_interconnect_router(self) -> Optional[pulumi.Input[bool]]:
"""
Field to indicate if a router is dedicated to use with encrypted Interconnect Attachment (IPsec-encrypted Cloud
Interconnect feature). Not currently available publicly.
"""
return pulumi.get(self, "encrypted_interconnect_router")
@encrypted_interconnect_router.setter
def encrypted_interconnect_router(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted_interconnect_router", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters
long and match the regular expression `a-z?`
which means the first character must be a lowercase letter, and all
following characters must be a dash, lowercase letter, or digit,
except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
A reference to the network to which this router belongs.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
Region where the router resides.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
The URI of the created resource.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
class Router(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bgp: Optional[pulumi.Input[pulumi.InputType['RouterBgpArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
encrypted_interconnect_router: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Represents a Router resource.
To get more information about Router, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routers)
* How-to Guides
* [Google Cloud Router](https://cloud.google.com/router/docs/)
## Example Usage
### Router Basic
```python
import pulumi
import pulumi_gcp as gcp
foobar_network = gcp.compute.Network("foobarNetwork", auto_create_subnetworks=False)
foobar_router = gcp.compute.Router("foobarRouter",
network=foobar_network.name,
bgp=gcp.compute.RouterBgpArgs(
asn=64514,
advertise_mode="CUSTOM",
advertised_groups=["ALL_SUBNETS"],
advertised_ip_ranges=[
gcp.compute.RouterBgpAdvertisedIpRangeArgs(
range="1.2.3.4",
),
gcp.compute.RouterBgpAdvertisedIpRangeArgs(
range="6.7.0.0/16",
),
],
))
```
### Compute Router Encrypted Interconnect
| |
need to check this.'''
isotope_check = [str(a) for a in [sum([[mod_names[[b[0] for b in mod_names].index(a)][1] for a in D[2][0].split('_')].count(m) == peptide.count(m) for m in [a[1] for a in mod_names]])==len(mod_names) for peptide in [peptideL,peptideH]]]
mods_check = [0,0]
for i,line in enumerate(zip([modsL,modsH],[peptideL,peptideH])):
# line is mods, peptide. i indicates light (0) or heavy (1)
mods_ = line[0]
peptide = line[1]
if not isotope_check[i]:
mods_check[i] == 'False'
else:
mod_indices = [peptide.index(a) for a in peptide if a in [b[1] for b in mod_names]]
s = sum([int(mods_[a]) for a in mod_indices])
if (s <> 0) == i:
mods_check[i] = 'True'
else:
mods_check[i] = 'False'
output = []
def rev_mods(m,p):
''' reverses mods.If K or R, 0 becomes 4 or 5 and 4 or 5 becomes 0'''
new = ""
for line in zip(m,p):
aa = line[1]
mod = line[0]
if aa not in [a[1] for a in mod_names]:
new += mod
else:
if mod == str(mod_dict[aa]):
new += '0'
else:
new += str(mod_dict[aa])
return new
if peptideL == peptideH and peptideL <> "None": # this is a match, so display the fragmentation patterns and count the number of matches
output.append( [ corroborate_peptide(peptideL,ions[0],mods[0]), corroborate_peptide(peptideL,ions[1],mods[1]) ] )
elif (isotope_check[0] == 'True' and mods_check[0] == 'True') or (isotope_check[1] == 'True' and mods_check[1] == 'True'):
if (isotope_check[0] == 'True' and mods_check[0] == 'True'):# the mods seem to work, so the other peptide might actually be an incorrect assignment
new_mods = rev_mods(mods[0],peptideL)
output.append( [ corroborate_peptide(peptideL,ions[0],mods[0]),corroborate_peptide(peptideL,ions[1],new_mods) ] ) # need to fix the second mods
if (isotope_check[1] == 'True' and mods_check[1] == 'True'):
new_mods = rev_mods(mods[1],peptideH)
output.append( [ corroborate_peptide(peptideH,ions[0],new_mods),corroborate_peptide(peptideH,ions[1],mods[1]) ] ) # need to fix the first mods
return output,isotope_check,mods_check
def same_ion_size_and_intensity(ion1,ion2,max_intensity):
'''ion1 and ion2 are [m/z,intensity] and max is max intensity for each [max,max] and threshold is min_ion_percent_intensity and max_ion_difference (globals)'''
m1 = ion1[0]
m2 = ion2[0]
i1 = ion1[1]
i2 = ion2[1]
max1 = max_intensity[0]
max2 = max_intensity[1]
ppm_calc = calc_ppm(m1,m2)
'''this line ensures that the two m/z are the same, that the peaks heights are above minimum (eg. 1% of max) and that the peak heigh difference isn't too much '''
rel_int_1 = i1/max1
rel_int_2 = i2/max2
if (abs(ppm_calc) < ms2_ppm_tolerance) and (abs(rel_int_1 - rel_int_2) / max(rel_int_1, rel_int_2)) < max_ion_difference:
return True
else:
return False
def validator_3_function(pairs,Q):
'''
This function will take each pair and bin the MS2 ions as B and Y.
Formerly, this was done by looking for shifters and non-shifters.
This function was modified to now account for missed cleavages - so the B and Y ions might have one (at least one for Y) internal isotopes
'''
all_matches = []
for pair in pairs:
q1 = pair[0][0]
q2 = pair[0][1]
c = [[],[],[]] # non-shifter, shifters and half-shifter matches
iso = pair[1][0]
d1 = Q[q1][0]
d2 = Q[q2][0]
MS2_1 = d1.MS2_ions
MS2_2 = d2.MS2_ions
MS2_ions_1 = [a for a in MS2_1] # just grab the m/z
MS2_ions_2 = [a for a in MS2_2]
MS2_ions_1.sort(lambda a,b:cmp(a[0],b[0]))
MS2_ions_2.sort(lambda a,b:cmp(a[0],b[0]))
max_intensity_1 = max([a[1] for a in MS2_ions_1])
max_intensity_2 = max([a[1] for a in MS2_ions_2])
max_intensities = [max_intensity_1, max_intensity_2]
if screen_print:
print "MS1 cutoff", MS1_cutoff
print "MS2 cutoff", MS2_cutoff
print "QUERIES",q1,q2
print "SCANS",d1.scan_number, d2.scan_number
s = ["LIGHT","HEAVY"]
print "These are all the MS2 ions"
for x,ions in enumerate([MS2_ions_1, MS2_ions_2]):
print
print s[x]
print '%8s'%("m/z"),'%8s'%("intensity") ,'%8s'%("percent max")
for ion in ions:
print '%8.3f'%(ion[0]),'%8.2f'%(ion[1]), '%8.4f'%(ion[1]/max_intensities[x])
print
print
print
#
# CONSIDER MODIFYING THIS SECTION TO CHANGE ALGORITHM ON FILTERING OUT LOW IONS
#
'''Filter out the ions that do not make the intensity criteria
THIS IS AN IMPORTANT STEP. A lot of ions will be lost here.'''
MS2_ions_1 = [a for a in MS2_ions_1 if a[1]/max_intensity_1>min_ion_percent_intensity]
MS2_ions_2 = [a for a in MS2_ions_2 if a[1]/max_intensity_2>min_ion_percent_intensity]
MS2 = [a[0] for a in MS2_ions_2]
if screen_print:
#
# PRINTOUT MS2 ions
#
print "A filter was applied to remove those ions not greater than",min_ion_percent_intensity,"of the max intensity"
print "The max intensity for light and heavy are"
print max_intensity_1, min_ion_percent_intensity * max_intensity_1
print max_intensity_2, min_ion_percent_intensity * max_intensity_2
print "These are the ions that are left after filtering."
for x,ions in enumerate([MS2_ions_1, MS2_ions_2]):
print "These are all the MS2 ions"
print '%8s'%("m/z"),'%8s'%("intensity") ,'%8s'%("percent max")
for ion in ions:
print '%8.3f'%(ion[0]),'%8.2f'%(ion[1]), '%8.4f'%(ion[1]/max_intensities[x])
print
print
print
M = [max_intensity_1, max_intensity_2]
S = [ sum(a[1] for a in MS2_ions_1), sum(a[1] for a in MS2_ions_2)]
isotopologue = pair[2]
PME = pair[3]
''' Look for shifters and non-shifters
For internal missed cleavages, will need to consider that B ions might be shifting and Y ions might be single or double (or more) shifted '''
isotopes = iso.split('_')
if screen_print:
print "These are the isotopes",isotopes
print "And these are the masses we will be checking."
for n in range(0,len(isotopes)+1): # need to consider the scenarios when the difference between ions in 0, 1... up to the full number of isotopes -
combos = itertools.combinations(isotopes,n) # get the possible cobinations from the isotopes - so if ARG_LYS_ARG, gets ARG, LYS, ARG_LYS, etc.
combos = [a for a in combos]
combos = unique(combos)
for combo in combos:
m = 0 # start with adding a mass of 0. the first combo will be the empty set anyway. This will find the b ions for the non-shifters
for isotope in combo: # sequentially add each isotope mass
m += isotope_masses[isotope]
print "Mass to check",m
print
print
''' Group the ions into shifters and non-shifters '''
for ion in MS2_ions_1: # consider each MS2 in sequence from the first group
T = len(MS2_ions_2)
for n in range(0,len(isotopes)+1): # need to consider the scenarios when the difference between ions in 0, 1... up to the full number of isotopes -
combos = itertools.combinations(isotopes,n) # get the possible combinations from the isotopes - so if ARG_LYS_ARG, gets ARG, LYS, ARG_LYS, etc.
combos = [a for a in combos]
combos = unique(combos)
for combo in combos:
# if len(combo) == 0: # non-shifter
# x = 0
# else:
# x = 1
m = 0.0 # start with adding a mass of 0. the first combo will be the empty set anyway. This will find the b ions for the non-shifters
for isotope in combo: # sequentially add each isotope mass
m += isotope_masses[isotope]
if m == 0.0: # non-shifter
X = [ [0,0.0] ]# looking for non-shifters. First number is position in c (non-shift, shift, half-shift)
else:
X = [ [1,m],[2,m/2.0] ] # this sets up to look for shifters and half shifters and add to the c array accordingly
for XX in X: # for non-shifters, this is only one item- the x will be 0 and the m will be 0. But for shifters, this will be two items - first for full shift and second for half shift
x = XX[0] # place in array (0 = non-shift, 1 = shift, 2 = half-shift)
m = XX[1] # the shifted amount to look for
pos = bisect.bisect_left(MS2,ion[0]+m) # find the point of insertion in the group of MS2 ions from the heavy
#print "XXX",x,m
if pos>0 and same_ion_size_and_intensity([ion[0]+m,ion[1]],MS2_ions_2[pos-1],M):
c[x].append([ion,MS2_ions_2[pos-1]]) # tack the match onto the end of B or Y or both
elif pos<T and same_ion_size_and_intensity([ion[0]+m,ion[1]],MS2_ions_2[pos],M):
c[x].append([ion,MS2_ions_2[pos]])
not_matched_light = [a for a in MS2_ions_1 if (a not in [b[0] for b in c[0]] and a not in [b[0] for b in c[1]]) and a[1]/max_intensity_1 > min_ion_percent_intensity]
not_matched_heavy = [a for a in MS2_ions_2 if( a not in [b[0] for b in c[0]] and a not in [b[0] for b in c[1]]) and a[1]/max_intensity_2 > min_ion_percent_intensity]
if screen_print:
print "Max Intensities"
print '%8.3f'%(M[0]), '%8.3f'%(M[1])
header = ["m/z light","m/z heavy","diff m/z","intensity L","intensity H","normalized intensity L","normalized intensity H","Diff normalized intensity","Relative Intensity difference"]
out = sys.stdout
print "Non-shifters"
d = []
for line in c[0]:
d.append(['%8.3f'%(line[0][0]), '%8.3f'%(line[1][0]), '%8.3f'%(abs(line[1][0]-line[0][0])), '%8.3f'%(line[0][1]), '%8.3f'%(line[1][1]), '%8.3f'%(line[0][1]/M[0]), '%8.3f'%(line[1][1]/M[1]), '%8.3f'%(abs(line[0][1]/M[0]-line[1][1]/M[1])),'%8.3f'%(abs(line[0][1]/M[0]-line[1][1]/M[1]) / max( [line[0][1]/M[0],line[1][1]/M[1]] ))])
table = [header] + d
pprint_table(out, table)
print
print "Shifters"
d = []
for line in c[1]:
d.append(['%8.3f'%(line[0][0]), '%8.3f'%(line[1][0]), '%8.3f'%(abs(line[1][0]-line[0][0])), '%8.3f'%(line[0][1]), '%8.3f'%(line[1][1]), '%8.3f'%(line[0][1]/M[0]), '%8.3f'%(line[1][1]/M[1]), '%8.3f'%(abs(line[0][1]/M[0]-line[1][1]/M[1])),'%8.3f'%(abs(line[0][1]/M[0]-line[1][1]/M[1]) / max( [line[0][1]/M[0],line[1][1]/M[1]] ))])
table = [header] + d
pprint_table(out, table)
print
print "Half-Shifters"
d = []
for line in c[2]:
d.append(['%8.3f'%(line[0][0]), '%8.3f'%(line[1][0]), '%8.3f'%(abs(line[1][0]-line[0][0])), '%8.3f'%(line[0][1]), '%8.3f'%(line[1][1]), '%8.3f'%(line[0][1]/M[0]), '%8.3f'%(line[1][1]/M[1]), '%8.3f'%(abs(line[0][1]/M[0]-line[1][1]/M[1])),'%8.3f'%(abs(line[0][1]/M[0]-line[1][1]/M[1]) / max( [line[0][1]/M[0],line[1][1]/M[1]] ))])
table = [header] + d
pprint_table(out, table)
print
c.append([not_matched_light,not_matched_heavy]) # c is [[non-shiftersL, non-shiftersH], [shiftersL, shiftersH], [half-shiftersL, half-shiftersH], [not-matchedL, not-matchedH], [sumions1,sumions2]]
c.append(S)
mass_1 = d1.precursor_MW
mass_2 = d2.precursor_MW
final_score = 100.0 * float(len(c[0]) + 2 * len(c[1])) / mass_1 # final score uses number of non-shifters and shifters
all_matches.append([[q1,q2],c,[iso,isotopologue,PME], final_score])
return all_matches
#@print_timing
def validator_studies():
@print_timing
def validator_3_analysis():
if screen_print:
print
print "##################################"
print "######### VALIDATOR 3 ##########"
print "##################################"
print
pairs_3 = validator_3(query_dictionary) # [['7733', '7753'], ['ARG', 10.00827], 'N', -0.12918151875875858]
if screen_print:
print
print "##################################"
print "######### VALIDATOR 3e ##########"
print "##################################"
print
matches_3 = validator_3_function(pairs_3,query_dictionary) # get scores, each line is ([[q1,q2],c,[iso,isotopologue,PME], final_score]) where c has the b ions, y ions and non-matches
matches_3.sort(lambda a,b:cmp(b[3],a[3])) # sort by final score (B + 2Y for instance)
Q = query_dictionary
peptides_matched_light_heavy = []
proteins_matched_light_heavy = []
peptides_mismatched_light_heavy = []
proteins_mismatched_light_heavy = []
proteins_matched_any = 0
# these are the column headings and column widths. The | |
<filename>ibllib/ephys/ephysqc.py
"""
Quality control of raw Neuropixel electrophysiology data.
"""
from pathlib import Path
import logging
import numpy as np
import pandas as pd
from scipy import signal
from scipy.ndimage import gaussian_filter1d
import alf.io
from brainbox.core import Bunch
from brainbox.processing import bincount2D
from ibllib.ephys import sync_probes
from ibllib.io import spikeglx
import ibllib.dsp as dsp
import ibllib.io.extractors.ephys_fpga as fpga
from ibllib.misc import print_progress, log2session_static
from phylib.io import model
_logger = logging.getLogger('ibllib')
RMS_WIN_LENGTH_SECS = 3
WELCH_WIN_LENGTH_SAMPLES = 1024
METRICS_PARAMS = {
'presence_bin_length_secs': 20,
"isi_threshold": 0.0015,
"min_isi": 0.000166,
"num_channels_to_compare": 13,
"max_spikes_for_unit": 500,
"max_spikes_for_nn": 10000,
"n_neighbors": 4,
'n_silhouette': 10000,
"quality_metrics_output_file": "metrics.csv",
"drift_metrics_interval_s": 51,
"drift_metrics_min_spikes_per_interval": 10
}
def rmsmap(fbin):
"""
Computes RMS map in time domain and spectra for each channel of Neuropixel probe
:param fbin: binary file in spike glx format (will look for attached metatdata)
:type fbin: str or pathlib.Path
:return: a dictionary with amplitudes in channeltime space, channelfrequency space, time
and frequency scales
"""
if not isinstance(fbin, spikeglx.Reader):
sglx = spikeglx.Reader(fbin)
rms_win_length_samples = 2 ** np.ceil(np.log2(sglx.fs * RMS_WIN_LENGTH_SECS))
# the window generator will generates window indices
wingen = dsp.WindowGenerator(ns=sglx.ns, nswin=rms_win_length_samples, overlap=0)
# pre-allocate output dictionary of numpy arrays
win = {'TRMS': np.zeros((wingen.nwin, sglx.nc)),
'nsamples': np.zeros((wingen.nwin,)),
'fscale': dsp.fscale(WELCH_WIN_LENGTH_SAMPLES, 1 / sglx.fs, one_sided=True),
'tscale': wingen.tscale(fs=sglx.fs)}
win['spectral_density'] = np.zeros((len(win['fscale']), sglx.nc))
# loop through the whole session
for first, last in wingen.firstlast:
D = sglx.read_samples(first_sample=first, last_sample=last)[0].transpose()
# remove low frequency noise below 1 Hz
D = dsp.hp(D, 1 / sglx.fs, [0, 1])
iw = wingen.iw
win['TRMS'][iw, :] = dsp.rms(D)
win['nsamples'][iw] = D.shape[1]
# the last window may be smaller than what is needed for welch
if last - first < WELCH_WIN_LENGTH_SAMPLES:
continue
# compute a smoothed spectrum using welch method
_, w = signal.welch(D, fs=sglx.fs, window='hanning', nperseg=WELCH_WIN_LENGTH_SAMPLES,
detrend='constant', return_onesided=True, scaling='density', axis=-1)
win['spectral_density'] += w.T
# print at least every 20 windows
if (iw % min(20, max(int(np.floor(wingen.nwin / 75)), 1))) == 0:
print_progress(iw, wingen.nwin)
return win
def extract_rmsmap(fbin, out_folder=None, force=False):
"""
Wrapper for rmsmap that outputs _ibl_ephysRmsMap and _ibl_ephysSpectra ALF files
:param fbin: binary file in spike glx format (will look for attached metatdata)
:param out_folder: folder in which to store output ALF files. Default uses the folder in which
the `fbin` file lives.
:param force: do not re-extract if all ALF files already exist
:param label: string or list of strings that will be appended to the filename before extension
:return: None
"""
_logger.info(str(fbin))
sglx = spikeglx.Reader(fbin)
# check if output ALF files exist already:
if out_folder is None:
out_folder = Path(fbin).parent
else:
out_folder = Path(out_folder)
alf_object_time = f'_iblqc_ephysTimeRms{sglx.type.upper()}'
alf_object_freq = f'_iblqc_ephysSpectralDensity{sglx.type.upper()}'
if alf.io.exists(out_folder, alf_object_time) and \
alf.io.exists(out_folder, alf_object_freq) and not force:
_logger.warning(f'{fbin.name} QC already exists, skipping. Use force option to override')
return
# crunch numbers
rms = rmsmap(fbin)
# output ALF files, single precision with the optional label as suffix before extension
if not out_folder.exists():
out_folder.mkdir()
tdict = {'rms': rms['TRMS'].astype(np.single), 'timestamps': rms['tscale'].astype(np.single)}
fdict = {'power': rms['spectral_density'].astype(np.single),
'freqs': rms['fscale'].astype(np.single)}
out_time = alf.io.save_object_npy(out_folder, object=alf_object_time, dico=tdict)
out_freq = alf.io.save_object_npy(out_folder, object=alf_object_freq, dico=fdict)
return out_time + out_freq
@log2session_static('ephys')
def raw_qc_session(session_path, dry=False, force=False):
"""
Wrapper that exectutes QC from a session folder and outputs the results whithin the same folder
as the original raw data.
:param session_path: path of the session (Subject/yyyy-mm-dd/number
:param dry: bool (False) Dry run if True
:param force: bool (False) Force means overwriting an existing QC file
:return: None
"""
efiles = spikeglx.glob_ephys_files(session_path)
for efile in efiles:
if efile.get('ap') and efile.ap.exists():
print(efile.get('ap'))
if not dry:
extract_rmsmap(efile.ap, out_folder=None, force=force)
if efile.get('lf') and efile.lf.exists():
print(efile.get('lf'))
if not dry:
extract_rmsmap(efile.lf, out_folder=None, force=force)
def validate_ttl_test(ses_path, display=False):
"""
For a mock session on the Ephys Choice world task, check the sync channels for all
device properly connected and perform a synchronization if dual probes to check that
all channels are recorded properly
:param ses_path: session path
:param display: show the probe synchronization plot if several probes
:return: True if tests pass, errors otherwise
"""
def _single_test(assertion, str_ok, str_ko):
if assertion:
_logger.info(str_ok)
return True
else:
_logger.error(str_ko)
return False
EXPECTED_RATES_HZ = {'left_camera': 60, 'right_camera': 150, 'body_camera': 30}
SYNC_RATE_HZ = 1
MIN_TRIALS_NB = 6
ok = True
ses_path = Path(ses_path)
if not ses_path.exists():
return False
rawsync, sync_map = fpga._get_main_probe_sync(ses_path)
last_time = rawsync['times'][-1]
# get upgoing fronts for each
sync = Bunch({})
for k in sync_map:
fronts = fpga._get_sync_fronts(rawsync, sync_map[k])
sync[k] = fronts['times'][fronts['polarities'] == 1]
wheel = fpga.extract_wheel_sync(rawsync, chmap=sync_map, save=False)
frame_rates = {'right_camera': np.round(1 / np.median(np.diff(sync.right_camera))),
'left_camera': np.round(1 / np.median(np.diff(sync.left_camera))),
'body_camera': np.round(1 / np.median(np.diff(sync.body_camera)))}
# check the camera frame rates
for lab in frame_rates:
expect = EXPECTED_RATES_HZ[lab]
ok &= _single_test(assertion=abs((1 - frame_rates[lab] / expect)) < 0.1,
str_ok=f'PASS: {lab} frame rate: {frame_rates[lab]} = {expect} Hz',
str_ko=f'FAILED: {lab} frame rate: {frame_rates[lab]} != {expect} Hz')
# check that the wheel has a minimum rate of activity on both channels
re_test = abs(1 - sync.rotary_encoder_1.size / sync.rotary_encoder_0.size) < 0.1
re_test &= len(wheel['re_pos']) / last_time > 5
ok &= _single_test(assertion=re_test,
str_ok="PASS: Rotary encoder", str_ko="FAILED: Rotary encoder")
# check that the frame 2 ttls has a minimum rate of activity
ok &= _single_test(assertion=len(sync.frame2ttl) / last_time > 0.2,
str_ok="PASS: Frame2TTL", str_ko="FAILED: Frame2TTL")
# the audio has to have at least one event per trial
ok &= _single_test(assertion=len(sync.bpod) > len(sync.audio) > MIN_TRIALS_NB,
str_ok="PASS: audio", str_ko="FAILED: audio")
# the bpod has to have at least twice the amount of min trial pulses
ok &= _single_test(assertion=len(sync.bpod) > MIN_TRIALS_NB * 2,
str_ok="PASS: Bpod", str_ko="FAILED: Bpod")
try:
# note: tried to depend as little as possible on the extraction code but for the valve...
behaviour = fpga.extract_behaviour_sync(rawsync, save=False, chmap=sync_map)
res = behaviour.valve_open.size > 1
except AssertionError:
res = False
# check that the reward valve is actionned at least once
ok &= _single_test(assertion=res,
str_ok="PASS: Valve open", str_ko="FAILED: Valve open not detected")
_logger.info('ALL CHECKS PASSED !')
# the imec sync is for 3B Probes only
if sync.get('imec_sync') is not None:
ok &= _single_test(assertion=np.all(1 - SYNC_RATE_HZ * np.diff(sync.imec_sync) < 0.1),
str_ok="PASS: imec sync", str_ko="FAILED: imec sync")
# second step is to test that we can make the sync. Assertions are whithin the synch code
if sync.get('imec_sync') is not None:
sync_result = sync_probes.version3B(ses_path, display=display)
else:
sync_result = sync_probes.version3A(ses_path, display=display)
ok &= _single_test(assertion=sync_result, str_ok="PASS: synchronisation",
str_ko="FAILED: probe synchronizations threshold exceeded")
if not ok:
raise ValueError('FAILED TTL test')
return ok
def _spike_sorting_metrics_ks2(ks2_path, save=True):
"""
Given a path containing kilosort 2 output, compute quality metrics and optionally save them
to a clusters_metric.csv file
:param ks2_path:
:param save
:return:
"""
m = phy_model_from_ks2_path(ks2_path)
r = spike_sorting_metrics(m.spike_times, m.spike_clusters, m.amplitudes, params=METRICS_PARAMS)
# includes the ks2 contamination
file_contamination = ks2_path.joinpath('cluster_ContamPct.tsv')
if file_contamination.exists():
contam = pd.read_csv(file_contamination, sep='\t')
contam.rename(columns={'ContamPct': 'ks2_contamination_pct'}, inplace=True)
r = r.set_index('cluster_id', drop=False).join(contam.set_index('cluster_id'))
# includes the ks2 labeling
file_labels = ks2_path.joinpath('cluster_KSLabel.tsv')
if file_labels.exists():
ks2_labels = pd.read_csv(file_labels, sep='\t')
ks2_labels.rename(columns={'KSLabel': 'ks2_label'}, inplace=True)
r = r.set_index('cluster_id', drop=False).join(ks2_labels.set_index('cluster_id'))
if save:
# the file name contains the label of the probe (directory name in this case)
r.to_csv(ks2_path.joinpath(f'cluster_metrics.csv'))
return r
def spike_sorting_metrics(spike_times, spike_clusters, spike_amplitudes,
params=METRICS_PARAMS, epochs=None):
""" Spike sorting QC metrics """
cluster_ids = np.arange(np.max(spike_clusters) + 1)
nclust = cluster_ids.size
r = Bunch({
'cluster_id': cluster_ids,
'num_spikes': np.zeros(nclust, ) + np.nan,
'firing_rate': np.zeros(nclust, ) + np.nan,
'presence_ratio': np.zeros(nclust, ) + np.nan,
'presence_ratio_std': np.zeros(nclust, ) + np.nan,
'isi_viol': np.zeros(nclust, ) + np.nan,
'amplitude_cutoff': np.zeros(nclust, ) + np.nan,
'amplitude_std': np.zeros(nclust, ) + np.nan,
# 'isolation_distance': np.zeros(nclust, ) + np.nan,
# 'l_ratio': np.zeros(nclust, ) + np.nan,
# 'd_prime': np.zeros(nclust, ) + np.nan,
# 'nn_hit_rate': np.zeros(nclust, ) + np.nan,
# 'nn_miss_rate': np.zeros(nclust, ) + np.nan,
# 'silhouette_score': np.zeros(nclust, ) + np.nan,
# 'max_drift': np.zeros(nclust, ) + np.nan,
# 'cumulative_drift': np.zeros(nclust, ) + np.nan,
'epoch_name': np.zeros(nclust, dtype='object'),
})
tmin = 0
tmax = spike_times[-1]
"""computes basic metrics such as spike rate and presence ratio"""
presence_ratio = bincount2D(spike_times, spike_clusters,
xbin=params['presence_bin_length_secs'],
ybin=cluster_ids, xlim=[tmin, tmax])[0]
r.num_spikes = np.sum(presence_ratio > 0, axis=1)
r.firing_rate = r.num_spikes / params['presence_bin_length_secs']
r.presence_ratio = np.sum(presence_ratio > 0, axis=1) / presence_ratio.shape[1]
r.presence_ratio_std = np.std(presence_ratio, axis=1)
# loop over each cluster
for ic in np.arange(nclust):
# slice the spike_times array
ispikes = spike_clusters == cluster_ids[ic]
if np.all(~ispikes):
continue
st = spike_times[ispikes]
sa = spike_amplitudes[ispikes]
# compute metrics
r.isi_viol[ic], _ = isi_violations(st, tmin, tmax,
isi_threshold=params['isi_threshold'],
min_isi=params['min_isi'])
r.amplitude_cutoff[ic] = amplitude_cutoff(amplitudes=sa)
r.amplitude_std[ic] = np.std(sa)
| |
<reponame>opoplawski/scipy<filename>scipy/sparse/linalg/_expm_multiply.py<gh_stars>0
"""Compute the action of the matrix exponential.
"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg import LinearOperator
__all__ = ['expm_multiply']
def _exact_inf_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=1).flat)
else:
return np.linalg.norm(A, np.inf)
def _exact_1_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
else:
return np.linalg.norm(A, 1)
def _trace(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return A.diagonal().sum()
else:
return np.trace(A)
def _ident_like(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None):
"""
Compute the action of the matrix exponential of A on B.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix or vector to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
Returns
-------
expm_A_B : ndarray
The result of the action :math:`e^{t_k A} B`.
Notes
-----
The optional arguments defining the sequence of evenly spaced time points
are compatible with the arguments of `numpy.linspace`.
The output ndarray shape is somewhat complicated so I explain it here.
The ndim of the output could be either 1, 2, or 3.
It would be 1 if you are computing the expm action on a single vector
at a single time point.
It would be 2 if you are computing the expm action on a vector
at multiple time points, or if you are computing the expm action
on a matrix at a single time point.
It would be 3 if you want the action on a matrix with multiple
columns at multiple time points.
If multiple time points are requested, expm_A_B[0] will always
be the action of the expm at the first time point,
regardless of whether the action is on a vector or a matrix.
References
----------
.. [1] <NAME> and <NAME> (2011)
"Computing the Action of the Matrix Exponential,
with an Application to Exponential Integrators."
SIAM Journal on Scientific Computing,
33 (2). pp. 488-511. ISSN 1064-8275
http://eprints.ma.man.ac.uk/1591/
.. [2] <NAME> and <NAME> (2010)
"Computing Matrix Functions."
Acta Numerica,
19. 159-208. ISSN 0962-4929
http://eprints.ma.man.ac.uk/1451/
"""
if all(arg is None for arg in (start, stop, num, endpoint)):
X = _expm_multiply_simple(A, B)
else:
X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint)
return X
def _expm_multiply_simple(A, B, t=1.0, balance=False):
"""
Compute the action of the matrix exponential at a single time point.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
t : float
A time point.
balance : bool
Indicates whether or not to apply balancing.
Returns
-------
F : ndarray
:math:`e^{t A} B`
Notes
-----
This is algorithm (3.2) in Al-Mohy and Higham (2011).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
"""
A helper function.
"""
if balance:
raise NotImplementedError
if tol is None:
u_d = 2 ** -53
tol = u_d
F = B
eta = math.exp(t*mu / float(s))
for i in range(s):
c1 = _exact_inf_norm(B)
for j in range(m_star):
coeff = t / float(s*(j+1))
B = coeff * A.dot(B)
c2 = _exact_inf_norm(B)
F = F + B
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
F = eta * F
B = F
return F
# This table helps to compute bounds.
# They seem to have been difficult to calculate, involving symbolic
# manipulation of equations, followed by numerical root finding.
_theta = {
# The first 30 values are from table A.3 of Computing Matrix Functions.
1: 2.29e-16,
2: 2.58e-8,
3: 1.39e-5,
4: 3.40e-4,
5: 2.40e-3,
6: 9.07e-3,
7: 2.38e-2,
8: 5.00e-2,
9: 8.96e-2,
10: 1.44e-1,
# 11
11: 2.14e-1,
12: 3.00e-1,
13: 4.00e-1,
14: 5.14e-1,
15: 6.41e-1,
16: 7.81e-1,
17: 9.31e-1,
18: 1.09,
19: 1.26,
20: 1.44,
# 21
21: 1.62,
22: 1.82,
23: 2.01,
24: 2.22,
25: 2.43,
26: 2.64,
27: 2.86,
28: 3.08,
29: 3.31,
30: 3.54,
# The rest are from table 3.1 of
# Computing the Action of the Matrix Exponential.
35 : 4.7,
40 : 6.0,
45 : 7.2,
50 : 8.5,
55 : 9.9,
}
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A)
return x
def matmat(self, X):
for i in range(self._p):
X= self._A.dot(X)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
#XXX Eventually turn this into an API function in the _onenormest module,
#XXX and remove its underscore,
#XXX but wait until expm_multiply goes into scipy.
return scipy.sparse.linalg.onenormest(MatrixPowerOperator(A, p))
class LazyOperatorNormInfo:
"""
Information about an operator is lazily computed.
The information includes the exact 1-norm of the operator,
in addition to estimates of 1-norms of powers of the operator.
This uses the notation of Computing the Action (2011).
This class is specialized enough to probably not be of general interest
outside of this module.
"""
def __init__(self, A, A_1_norm=None, ell=2):
"""
Provide the operator and some norm-related information.
Parameters
----------
A : linear operator
The operator of interest.
A_1_norm : float, optional
The exact 1-norm of A.
ell | |
<reponame>mikhail-dvorkin/competitions<filename>adventofcode/y2018.py
#!/usr/bin/env python3
import adventofcode
import collections
import itertools
import numpy as np
import re
assembler_instructions = {
'addr': lambda r, a, b: r[a] + r[b],
'addi': lambda r, a, b: r[a] + b,
'mulr': lambda r, a, b: r[a] * r[b],
'muli': lambda r, a, b: r[a] * b,
'banr': lambda r, a, b: r[a] & r[b],
'bani': lambda r, a, b: r[a] & b,
'borr': lambda r, a, b: r[a] | r[b],
'bori': lambda r, a, b: r[a] | b,
'setr': lambda r, a, b: r[a],
'seti': lambda r, a, b: a,
'gtir': lambda r, a, b: int(a > r[b]),
'gtri': lambda r, a, b: int(r[a] > b),
'gtrr': lambda r, a, b: int(r[a] > r[b]),
'eqir': lambda r, a, b: int(a == r[b]),
'eqri': lambda r, a, b: int(r[a] == b),
'eqrr': lambda r, a, b: int(r[a] == r[b])
}
def assembler_instruction(r, instruction, a, b, c):
r[c] = assembler_instructions[instruction](r, a, b)
def day1(s):
s = list(map(int, s.split()))
yield sum(s)
seen = {0}
for x in itertools.accumulate(itertools.cycle(s)):
if x in seen:
yield x
break
seen.add(x)
def day2(s):
s = s.split()
count_lines = [sum([i in collections.Counter(line).values() for line in s]) for i in (2, 3)]
yield np.prod(count_lines)
altered = [line[:i] + '$' + line[i + 1:] for line in s for i in range(len(line))]
yield next((k for (k, v) in collections.Counter(altered).items() if v > 1)).replace('$', '')
def day3(s):
s = s.split('\n')
count = collections.defaultdict(lambda: 0)
overlap = set()
uids = set()
def f1():
count[(i, j)] += 1
def f2():
if count[(i, j)] > 1:
overlap.add(uid)
for f in f1, f2:
for line in s:
uid, x, y, width, height = map(int, re.fullmatch(r'#(\d+)\s@\s(\d+),(\d+):\s(\d+)x(\d+)', line).groups())
uids.add(uid)
for i in range(x, x + width):
for j in range(y, y + height):
f()
yield len([x for x in count.values() if x > 1])
yield (uids - overlap).pop()
def day4(s):
s = sorted(s.split('\n'))
sleep_dict = {}
for line in s:
time, event = re.fullmatch(r'\[\d+-\d+-\d+\s\d+:(\d+)\]\s(.+)', line).groups()
time = int(time)
if event == 'falls asleep':
start = time
continue
if event != 'wakes up':
uid = int(re.fullmatch(r'Guard #(\d+) begins shift', event).group(1))
continue
sleep_dict.setdefault(uid, [])
sleep_dict[uid].append((start, time))
del start
sleep_count = dict([(k, [0] * 60) for k in sleep_dict])
for uid, sleeps in sleep_dict.items():
for start, end in sleeps:
for i in range(start, end):
sleep_count[uid][i] += 1
for f in sum, max:
selected = max([(uid, f(sleep_count[uid])) for uid in sleep_count], key=lambda item: item[1])[0]
yield selected * np.argmax(sleep_count[selected])
def day5(s):
def match(a, b):
return a.lower() == b.lower() and a != b
def process(s, forbidden=None):
stack = []
for c in s:
if c.lower() == forbidden:
continue
stack.append(c)
while len(stack) >= 2 and match(*stack[-2:]):
del stack[-2:]
return len(stack)
yield process(s)
yield min([process(s, chr(ord('a') + i)) for i in range(26)])
def day6(s, limit=10000):
s = [tuple(map(int, line.split(', '))) for line in s.split('\n')]
xmin, xmax, ymin, ymax = [f(coords) for coords in zip(*s) for f in [min, max]]
inf = (xmax - xmin + 1) * (ymax - ymin + 1)
count = [0] * len(s)
center = 0
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
dist, near, total = inf, None, 0
for i in range(len(s)):
cur = abs(s[i][0] - x) + abs(s[i][1] - y)
total += cur
if cur < dist:
dist, near = cur, i
elif cur == dist:
near = None
if near != None:
count[near] += 1 if xmin < x < xmax and ymin < y < ymax else inf
if total < limit:
center += 1
yield max([x for x in count if x < inf])
yield center
def day7(s, workers=(1, 5), time_penalty=60):
s = s.split('\n')
g = [re.fullmatch(r'Step (\S) must be finished before step (\S) can begin.', line).groups() for line in s]
for w in workers:
edges = g[:]
vertices = set(sum(edges, ()))
order, free, time, queue = '', w, 0, []
while queue or vertices:
while free and vertices:
possible = vertices - set([edge[1] for edge in edges])
if not possible:
break
v = min(possible)
queue.append((time + time_penalty + 1 + ord(v) - ord('A'), v))
free -= 1
vertices.remove(v)
order += v
time, v = min(queue)
queue.remove((time, v))
edges = [edge for edge in edges if edge[0] != v]
free += 1
yield order if w == 1 else time
def day8(s):
s = list(map(int, s.split()))
x = 0
def parse():
nonlocal x
kids_number = s[x]
x += 1
entries_number = s[x]
x += 1
kids = [parse() for _ in range(kids_number)]
entries = s[x:x + entries_number]
x += entries_number
return (kids, entries)
root = parse()
def sum_entries(v):
return sum(v[1]) + sum([sum_entries(u) for u in v[0]])
yield sum_entries(root)
def value(v):
if not v[0]:
return sum(v[1])
return sum([value(v[0][index - 1]) for index in v[1] if 0 <= index - 1 < len(v[0])])
yield value(root)
def day9(s, coef=100, period=23, stepback=7):
p, n = map(int, re.fullmatch(r'(\d+) players; last marble is worth (\d+) points', s).groups())
a = collections.deque([0])
score = [0] * p
for i in range(1, coef * n + 1):
if i % period:
a.extend([a.popleft(), i])
else:
for _ in range(stepback):
a.appendleft(a.pop())
score[(i - 1) % p] += i + a.pop()
a.append(a.popleft())
if i in [n, coef * n]:
yield max(score)
def day10(s):
s = s.split('\n')
points = [tuple(map(int, re.fullmatch(r'position=<\s*(\S+),\s*(\S+)> velocity=<\s*(\S+),\s*(\S+)>', line).groups())) for line in s]
xa, ya, vxa, vya = tuple(map(np.average, zip(*points)))
sa = sum([(x - xa) * (vx - vxa) + (y - ya) * (vy - vya) for x, y, vx, vy in points])
sb = sum([(vx - vxa) ** 2 + (vy - vya) ** 2 for x, y, vx, vy in points])
t = int(round(-sa / sb))
yield adventofcode.show_pixels([(x + t * vx, y + t * vy) for x, y, vx, vy in points])
yield t
def day11(n, m=300, simple=[3]):
def f(x, y):
r = x + 10
r = (r * y + n) * r
return r % 1000 // 100 - 5
a = [[f(x, y) for y in range(m)] for x in range(m)]
p = [[0 for y in range(m + 1)] for x in range(m + 1)]
for x in range(m):
for y in range(m):
p[x + 1][y + 1] = p[x + 1][y] + p[x][y + 1] - p[x][y] + a[x][y]
for sizes in [simple, range(1, m + 1)]:
best = (float("-inf"),)
for s in sizes:
for x in range(m - s + 1):
for y in range(m - s + 1):
cur = p[x + s][y + s] - p[x][y + s] - p[x + s][y] + p[x][y]
best = max(best, (cur, x, y, s))
best = best[1:3] if sizes == simple else best[1:]
yield ','.join(map(str, best))
def day12(s, simple=20, hard=50000000000, stable=128):
f, _, *s = s.split('\n')
f = f.split()[-1]
rules = dict([line.split(' => ') for line in s])
nei = len(next(iter(rules)))
ans = []
for gen in range(2 * stable):
ans.append(sum([i - (nei // 2) * gen for i in range(len(f)) if f[i] == '#']))
f = '.' * (nei - 1) + f + '.' * (nei - 1)
f = ''.join([rules[f[i:i + nei]] for i in range(len(f) + 1 - nei)])
yield ans[simple]
xs, ys = range(stable, 2 * stable), ans[-stable:]
yield int(round(np.poly1d(np.polyfit(xs, ys, 1))(hard)))
def day13(s):
s = s.split('\n')
carts = []
for y in range(len(s)):
s[y] = list(s[y])
for x in range(len(s[y])):
for char, dy, dx in [('^', -1, 0), ('v', 1, 0), ('<', 0, -1), ('>', 0, 1)]:
if s[y][x] == char:
s[y][x] = '-' if dy == 0 else '|'
cart = adventofcode.AttrDict(y=y, x=x, dy=dy, dx=dx, c=0, alive=True)
carts.append(cart)
occupied = dict([((cart.y, cart.x), cart) for cart in carts])
firstcrash = None
while len(carts) > 1:
carts.sort(key=lambda cart: (cart.y, cart.x))
for cart in carts:
if not cart.alive:
continue
del occupied[(cart.y, cart.x)]
cart.y += cart.dy
cart.x += cart.dx
if (cart.y, cart.x) in occupied:
firstcrash = firstcrash or cart
that = occupied[(cart.y, cart.x)]
cart.alive = that.alive = False
del occupied[(cart.y, cart.x)]
continue
occupied[(cart.y, cart.x)] = cart
if s[cart.y][cart.x] == '/':
cart.dy, cart.dx = -cart.dx, -cart.dy
elif s[cart.y][cart.x] == '\\':
cart.dy, cart.dx = cart.dx, cart.dy
elif s[cart.y][cart.x] == '+':
if cart.c == 0:
cart.dy, cart.dx = -cart.dx, cart.dy
elif cart.c == 2:
cart.dy, cart.dx = cart.dx, -cart.dy
cart.c = (cart.c + 1) % 3
carts = [cart for cart in carts if cart.alive]
for cart in firstcrash, carts[0]:
yield "{},{}".format(cart.x, cart.y)
def day14(n, a=[3, 7], pos=[0, 1], window=10):
searched = list(map(int, str(n)))
ans = [None] * 2
while not all(ans):
s = str(a[pos[0]] + a[pos[1]])
for d in map(int, s):
a.append(d)
if a[-len(searched):] == searched:
ans[1] = ans[1] or len(a) - len(searched)
pos = [(x + 1 + a[x]) % len(a) for x in pos]
if len(a) >= n + window:
ans[0] = ans[0] or ''.join(map(str, a[n:n + window]))
yield from ans
def day15(s, enemies='EG', empty='.', attack=3, health=200):
D = [(-1, 0), (0, -1), (0, 1), (1, 0)]
s = list(map(list, s.split('\n')))
hei, wid = len(s), len(s[0])
inf = max(health, hei * wid) + 1
f = None
def outside(y, x):
return y < 0 or y >= hei or x < 0 or x >= wid
def not_empty(y, x):
return outside(y, x) or f[y][x] != empty
def bfs(y, x):
dist = [[inf] * wid for _ in range(hei)]
dist[y][x] = 0
queue = [(y, x)]
index = 0
while index < len(queue):
y, x = queue[index]
index += 1
for dy, dx in D:
yy, xx = y + dy, x + dx
if not_empty(yy, xx) or dist[yy][xx] != inf:
continue
dist[yy][xx] = dist[y][x] + 1
queue.append((yy, xx))
return dist
def run(attack_high=attack):
nonlocal f
f = [s[i][:] for i in | |
doc
self._childView = view
if view:
view.SetFrame(self)
# self.Create(doc, view, frame, id, title, pos, size, style, name)
self._activeEvent = None
self._activated = 0
wx.EVT_ACTIVATE(self, self.OnActivate)
wx.EVT_CLOSE(self, self.OnCloseWindow)
if frame: # wxBug: For some reason the EVT_ACTIVATE event is not getting triggered for the first mdi client window that is opened so we have to do it manually
mdiChildren = filter(lambda x: isinstance(x, wx.MDIChildFrame), frame.GetChildren())
if len(mdiChildren) == 1:
self.Activate()
## # Couldn't get this to work, but seems to work fine with single stage construction
## def Create(self, doc, view, frame, id, title, pos, size, style, name):
## self._childDocument = doc
## self._childView = view
## if wx.MDIChildFrame.Create(self, frame, id, title, pos, size, style, name):
## if view:
## view.SetFrame(self)
## return True
## return False
def Activate(self): # Need this in case there are embedded sash windows and such, OnActivate is not getting called
"""
Activates the current view.
"""
if self._childView:
self._childView.Activate(True)
def ProcessEvent(event):
"""
Processes an event, searching event tables and calling zero or more
suitable event handler function(s). Note that the ProcessEvent
method is called from the wxPython docview framework directly since
wxPython does not have a virtual ProcessEvent function.
"""
if self._activeEvent == event:
return False
self._activeEvent = event # Break recursion loops
if self._childView:
self._childView.Activate(True)
if not self._childView or not self._childView.ProcessEvent(event):
if not isinstance(event, wx.CommandEvent) or not self.GetParent() or not self.GetParent().ProcessEvent(event):
ret = False
else:
ret = True
else:
ret = True
self._activeEvent = None
return ret
def OnActivate(self, event):
"""
Sets the currently active view to be the frame's view. You may need to
override (but still call) this function in order to set the keyboard
focus for your subwindow.
"""
event.Skip()
if self._activated != 0:
return True
self._activated += 1
wx.MDIChildFrame.Activate(self)
if event.GetActive() and self._childView:
self._childView.Activate(event.GetActive())
self._activated = 0
def OnCloseWindow(self, event):
"""
Closes and deletes the current view and document.
"""
if self._childView:
ans = False
if not event.CanVeto():
ans = True
else:
ans = self._childView.Close(deleteWindow = False)
if ans:
self._childView.Activate(False)
self._childView.Destroy()
self._childView = None
if self._childDocument: # This isn't in the wxWindows codebase but the document needs to be disposed of somehow
self._childDocument.DeleteContents()
if self._childDocument.GetDocumentManager():
self._childDocument.GetDocumentManager().RemoveDocument(self._childDocument)
self._childDocument = None
self.Destroy()
else:
event.Veto()
else:
event.Veto()
def GetDocument(self):
"""
Returns the document associated with this frame.
"""
return self._childDocument
def SetDocument(self, document):
"""
Sets the document for this frame.
"""
self._childDocument = document
def GetView(self):
"""
Returns the view associated with this frame.
"""
return self._childView
def SetView(self, view):
"""
Sets the view for this frame.
"""
self._childView = view
def OnTitleIsModified(self):
"""
Add/remove to the frame's title an indication that the document is dirty.
If the document is dirty, an '*' is appended to the title
This method has been added to wxPython and is not in wxWindows.
"""
title = self.GetTitle()
if title:
if self.GetDocument().IsModified():
if title.endswith("*"):
return
else:
title = title + "*"
self.SetTitle(title)
else:
if title.endswith("*"):
title = title[:-1]
self.SetTitle(title)
else:
return
class DocPrintout(wx.Printout):
"""
DocPrintout is a default Printout that prints the first page of a document
view.
"""
def __init__(self, view, title="Printout"):
"""
Constructor.
"""
wx.Printout.__init__(self, title)
self._printoutView = view
def GetView(self):
"""
Returns the DocPrintout's view.
"""
return self._printoutView
def OnPrintPage(self, page):
"""
Prints the first page of the view.
"""
dc = self.GetDC()
ppiScreenX, ppiScreenY = self.GetPPIScreen()
ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()
scale = ppiPrinterX/ppiScreenX
w, h = dc.GetSize()
pageWidth, pageHeight = self.GetPageSizePixels()
overallScale = scale * w / pageWidth
dc.SetUserScale(overallScale, overallScale)
if self._printoutView:
self._printoutView.OnDraw(dc)
return True
def HasPage(self, pageNum):
"""
Indicates that the DocPrintout only has a single page.
"""
return pageNum == 1
def GetPageInfo(self):
"""
Indicates that the DocPrintout only has a single page.
"""
minPage = 1
maxPage = 1
selPageFrom = 1
selPageTo = 1
return (minPage, maxPage, selPageFrom, selPageTo)
#----------------------------------------------------------------------
# Command Classes
#----------------------------------------------------------------------
class Command(wx.Object):
"""
wxCommand is a base class for modelling an application command, which is
an action usually performed by selecting a menu item, pressing a toolbar
button or any other means provided by the application to change the data
or view.
"""
def __init__(self, canUndo = False, name = None):
"""
Constructor. wxCommand is an abstract class, so you will need to
derive a new class and call this constructor from your own constructor.
canUndo tells the command processor whether this command is undo-able.
You can achieve the same functionality by overriding the CanUndo member
function (if for example the criteria for undoability is context-
dependent).
name must be supplied for the command processor to display the command
name in the application's edit menu.
"""
self._canUndo = canUndo
self._name = name
def CanUndo(self):
"""
Returns true if the command can be undone, false otherwise.
"""
return self._canUndo
def GetName(self):
"""
Returns the command name.
"""
return self._name
def Do(self):
"""
Override this member function to execute the appropriate action when
called. Return true to indicate that the action has taken place, false
otherwise. Returning false will indicate to the command processor that
the action is not undoable and should not be added to the command
history.
"""
return True
def Undo(self):
"""
Override this member function to un-execute a previous Do. Return true
to indicate that the action has taken place, false otherwise. Returning
false will indicate to the command processor that the action is not
redoable and no change should be made to the command history.
How you implement this command is totally application dependent, but
typical strategies include:
Perform an inverse operation on the last modified piece of data in the
document. When redone, a copy of data stored in command is pasted back
or some operation reapplied. This relies on the fact that you know the
ordering of Undos; the user can never Undo at an arbitrary position in
he command history.
Restore the entire document state (perhaps using document
transactioning). Potentially very inefficient, but possibly easier to
code if the user interface and data are complex, and an 'inverse
execute' operation is hard to write.
"""
return True
class CommandProcessor(wx.Object):
"""
wxCommandProcessor is a class that maintains a history of wxCommands, with
undo/redo functionality built-in. Derive a new class from this if you want
different behaviour.
"""
def __init__(self, maxCommands=-1):
"""
Constructor. maxCommands may be set to a positive integer to limit
the number of commands stored to it, otherwise (and by default) the
list of commands can grow arbitrarily.
"""
self._maxCommands = maxCommands
self._editMenu = None
self._undoAccelerator = _("Ctrl+Z")
self._redoAccelerator = _("Ctrl+Y")
self.ClearCommands()
def _GetCurrentCommand(self):
if len(self._commands) == 0:
return None
else:
return self._commands[-1]
def _GetCurrentRedoCommand(self):
if len(self._redoCommands) == 0:
return None
else:
return self._redoCommands[-1]
def GetMaxCommands(self):
"""
Returns the maximum number of commands that the command processor
stores.
"""
return self._maxCommands
def GetCommands(self):
"""
Returns the list of commands.
"""
return self._commands
def ClearCommands(self):
"""
Deletes all the commands in the list and sets the current command
pointer to None.
"""
self._commands = []
self._redoCommands = []
def GetEditMenu(self):
"""
Returns the edit menu associated with the command processor.
"""
return self._editMenu
def SetEditMenu(self, menu):
"""
Tells the command processor to update the Undo and Redo items on this
menu as appropriate. Set this to NULL if the menu is about to be
destroyed and command operations may still be performed, or the
command processor may try to access an invalid pointer.
"""
self._editMenu = menu
def GetUndoAccelerator(self):
"""
Returns the string that will be appended to the Undo menu item.
"""
return self._undoAccelerator
def SetUndoAccelerator(self, accel):
"""
Sets the string that will be appended to the Redo menu item.
"""
self._undoAccelerator = accel
def GetRedoAccelerator(self):
"""
Returns the string that will be appended to the Redo menu item.
"""
return self._redoAccelerator
def SetRedoAccelerator(self, accel):
"""
Sets the string that will be appended to the Redo menu item.
"""
self._redoAccelerator = accel
def SetMenuStrings(self):
"""
Sets the menu labels according to the | |
model arg must be a ModelConfig instead of nn.Module."
assert not isinstance(self.model, ModelConfig), error_msg2
# initialize logger
self.logger = _Logger(n_classes=n_classes, criteria=criteria, verbose=verbose)
def initialize(self, device: str, pretrained_param_path=None, n_threads=12):
"""
Initialize the model, optimizer and the scheduler.
Put the model on the specified device.
Optionally, load the pretrained params and replace the head module (if new_head is not None) for transfer learning.
:param device: which device to be trained on e.g. 'cpu' - on cpu, 'cuda:0' - on gpu 0, 'cuda:1' - on gpu 1
:param pretrained_param_path: path to pretrained model param
:param n_threads: configure the thread usage, only applicable when using cpu for computing
:return: void
"""
if device == 'cpu':
torch.set_num_threads(n_threads)
self.device = device
# move model to device before initialize the optimizers and scheduler
if self.agent == 'trainer':
self.model = self.model.to(device)
self.loss_module = self.loss_module.to(device)
# initialize the optimizer and scheduler
if self.optimizer is not None:
self.optimizer = self.optimizer(self.model.parameters())
# use scheduler
if self.scheduler is not None:
self.scheduler = self.scheduler(optimizer=self.optimizer)
# load the pretrained param if provided
if pretrained_param_path is not None:
# sanity check
error_msg = 'pretrained_param_path must end with .pth'
assert isinstance(pretrained_param_path, str) and pretrained_param_path.endswith('pth'), error_msg
meta_state = torch.load(f=pretrained_param_path, map_location='cpu')
# load the model state
# compatible with pure model state or the logger output
model_state = meta_state['model_state'] if 'model_state' in meta_state else meta_state
self.model.load_state_dict(model_state)
# turn on/off the requires_grads
for (param_name, param) in self.model.named_parameters():
requires_grad = True
for frozen_block in self.blocks_to_freeze:
if param_name.startswith(frozen_block): # decide which block to freeze based on the prefix
requires_grad = False
print("Param {} requires_grad was turned off".format(param_name))
param.requires_grad = requires_grad
# replace the old head with new head
if self.new_head is not None:
# sanity check, the model must have the head_block
error_msg = 'Model does not have the head_block. Head replacement only supports the mech created by the Builder class'
assert 'head_block' in [name[0] for name in self.model.named_modules()], error_msg
# set it to the new head
new_head = AvgPoolFCHead(**self.new_head) if isinstance(self.new_head, dict) else self.new_head
new_head = new_head.to(self.device)
self.model.head_block = new_head
print('Head module has been replaced.')
elif self.agent == 'evaluator':
# load the model
if not self.manual_load_model_param:
self.load_checkpoint()
self.model = self.model.to(device)
def load_model_params(self, model_state_dict: dict):
"""
Load pre-trained model params
:param model_state_dict: model state dict
:return: void
"""
self.model.load_state_dict(model_state_dict)
def load_checkpoint(self):
"""
Load the model check point directly from the checkpoint folder
:return: void
"""
# load metastate
metastate_dict = torch.load(os.path.join(self.checkpoint_folder, "{}_metastate_best.pth".format(self.prefix)),
map_location=torch.device('cpu'))
# load model param
self.model.load_state_dict(metastate_dict['model_state'])
# only load the optimizer, scheduler and logger for trainer
if self.agent == 'trainer':
# load the epoch number
self.base_epoch = metastate_dict['epoch']
# load optimizer and scheduler
if self.optimizer is not None:
self.optimizer: Optimizer
self.optimizer.load_state_dict(metastate_dict['optimizer_state'])
if self.scheduler is not None:
self.scheduler.load_state_dict(metastate_dict['scheduler_state'])
# load the logger
self.logger.load(metastate_dict['logger_state'])
def save_checkpoint(self, *args):
pass
def get_logger(self):
"""
Getter to get the Logger
:return: Logger
"""
return self.logger
def reset_logger(self):
"""
Reset the logger
:return: void
"""
self.logger.reset()
def use_notebook(self):
"""
Switch to notebook mode - affect the tqdm pbar
:return: void
"""
self.running_env = 'notebook'
def use_terminal(self):
"""
Switch to terminal mode - affect the tqdm pbar
:return: void
"""
self.running_env = 'terminal'
class Trainer(__BaseAgent):
"""
TODO: tutorials
TODO: make it able to choose micro/macro for model selection
"""
def __init__(self, model: nn.Module or Builder or ModelConfig, loss_module: nn.Module,
n_classes: int, criteria: str,
optimizer: partial or torch.optim.optimizer.Optimizer, scheduler: partial or None,
prefix: str, checkpoint_folder: str,
new_head=None, blocks_to_freeze=None, verbose=False):
"""
Constructor
:param model: model architecture (nn.Module) or a Builder/Config Instance that builds the model. For transfer learning, pass the Config
Instance.
:param loss_module: loss module
:param n_classes: number of classes
:param criteria: criteria for model selection: accuracy, precision, recall and f1 score (will use macro average)
:param optimizer: parameter optimizer. DO NOT pass the instance (i.e. DO NOT do encoder_optimizer=Adam(...)) instead, just pass the Class
interface (i.e. encoder_optimizer=Adam) or, if need to specify the optimizer params, use partial: (e.g. encoder_optimizer=partial(Adam,
lr=1e-5, ...)) instance initialization will be handled by the trainer
:param scheduler: learning rate scheduler. pass the interface instead of the instance
:param prefix: checkpoint naming prefix
:param checkpoint_folder: checkpoint
:param new_head: arg for transfer learning. If pass an nn.Module, the module will replace the current head block. The
:param blocks_to_freeze: arg for transfer learning. Pass the block prefix to freeze the blocks, e.g. ['init_block'] will freeze all
parameters whose name starting with 'init_block'.
:param verbose: whether to print out additional message for debugging
"""
super().__init__(model=model, loss_module=loss_module,
n_classes=n_classes, criteria=criteria, verbose=verbose,
optimizer=optimizer, scheduler=scheduler,
prefix=prefix, checkpoint_folder=checkpoint_folder,
new_head=new_head, blocks_to_freeze=blocks_to_freeze)
self.agent = 'trainer'
def train(self, datahandler: DataHandler, epochs, seed):
"""
Model training pipeline with loss & performance metrics logging & automatic checkpoint
:param datahandler: data handler
:param epochs: total number of training epochs
:param seed: seed for random state
:return:
"""
assert isinstance(self.optimizer, Optimizer), 'Need to call initialize() before train()!'
# fix random state
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# fix the cudnn backend (might break the code if pytorch refactored this)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# memory monitoring
memory_usage = {'train': -1, 'eval': -1}
# placeholder
epoch_recorder = {'train': {},
'eval': {}}
# main loop
for epoch in self.pbar[self.running_env](range(self.base_epoch, epochs), total=epochs - self.base_epoch, desc='Epochs'):
print("=====Start Epoch {}======\n".format(epoch))
for phase in ['train', 'eval']:
# switch phase setting - only collect the feature/temporal importance during trainig time
if phase == 'train':
self.model.train()
self.loss_module.train()
else:
self.model.eval()
self.loss_module.eval() # might not be necessary as the loss function does not hold any params
pbar_msg = 'Epoch {} Phase {}'.format(epoch, phase)
for i, mini_batch in self.pbar[self.running_env](enumerate(datahandler[phase]), total=len(datahandler[phase]), desc=pbar_msg):
# zero the gradient
if phase == 'train':
self.optimizer.zero_grad()
# grab X, Y
if isinstance(mini_batch, dict):
X_mini_batch, Y_mini_batch = mini_batch['x'], mini_batch['y']
else:
X_mini_batch, Y_mini_batch = mini_batch
# move to computing device
X_mini_batch = X_mini_batch.to(self.device)
Y_mini_batch = Y_mini_batch.to(self.device)
# enable the gradient flow if in training phase
with torch.set_grad_enabled(phase == 'train'):
# run through model and get the output_scores
output_scores = self.model(X_mini_batch)
# TODO: refactor the loss_module forward pass
loss = self.loss_module(X=output_scores, Y=Y_mini_batch)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
self.optimizer.step()
# batch logging - ground truth, prediction, batch loss
loss_cpu = loss.cpu().detach().tolist()
ground_truth = Y_mini_batch.cpu().detach().view(-1).tolist()
predictions = output_scores.cpu().detach().argmax(-1).view(-1).tolist()
self.logger.login_batch(phase=phase, ground_truth=ground_truth, predictions=predictions,
loss=loss_cpu)
if self.device != 'cpu':
memory_usage[phase] = torch.cuda.memory_allocated(device=self.device)
# clear the memory
del loss
del output_scores
del X_mini_batch
del Y_mini_batch
# ITERATION END - login the compute the performance metrics
epoch_loss, acc, selected_metric, find_better_model = self.logger.login_epoch(phase=phase, criteria=self.criteria, epoch=epoch)
epoch_recorder[phase]['loss'] = np.round(epoch_loss, 4)
epoch_recorder[phase]['acc'] = np.round(acc, 4)
epoch_recorder[phase]['perf'] = np.round(selected_metric, 4)
# EPOCH END - done with train & eval
# report
print("+++ Epoch {} Report +++".format(epoch))
print("Train Loss: {} Train Accuracy: {} Train {}: {}".format(epoch_recorder['train']['loss'], epoch_recorder['train']['acc'],
self.criteria, epoch_recorder['train']['perf']))
print("Eval Loss: {} Eval Accuracy: {} Eval {}: {}".format(epoch_recorder['eval']['loss'], epoch_recorder['eval']['acc'],
self.criteria, epoch_recorder['eval']['perf']))
if self.device != 'cpu':
print("Memory usage during train: {} | Memory usage during eval: {}".format(memory_usage['train'], memory_usage['eval']))
# scheduler step
if self.scheduler is not None:
if isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step(loss)
else:
self.scheduler.step()
# checkpoint
if find_better_model:
print('+++')
print("Better model found at epoch={}, eval accuracy={}, eval {}={}".format(epoch, acc, self.criteria, selected_metric))
self.save_checkpoint(epoch=epoch, last_epoch=False)
print('Checkpoint saved.')
print('+++')
print("\n======End Epoch {}=======\n".format(epoch))
# for last epoch
if epoch == epochs - 1:
self.save_checkpoint(epoch, last_epoch=True)
def save_checkpoint(self, epoch, last_epoch=False):
"""
Save the model check point
:param epoch: epoch number
:param last_epoch: saving mode for last epoch
:return: void
"""
# epoch number, logger, model, optimizer and scheduler
metastate_dict = {'epoch': epoch + 1,
'logger_state': self.logger.get_logger_state(),
'model_state': self.model.state_dict()}
# optimizer and scheduler
if self.optimizer is not None:
self.optimizer: Optimizer
metastate_dict['optimizer_state'] = self.optimizer.state_dict()
if self.scheduler is not None:
metastate_dict['scheduler_state'] = self.scheduler.state_dict()
# model structure text and training date
string_rep = str(self.model)
with open(os.path.join(self.checkpoint_folder, '{}_model_arch.txt'.format(self.prefix)), 'w') as fp:
now = datetime.now()
now = now.strftime("%m/%d/%Y, %H:%M:%S")
fp.writelines(now + '\n')
fp.writelines(string_rep)
# save the meta state
if not last_epoch: # an additional checkpoint for the last epoch
torch.save(metastate_dict, os.path.join(self.checkpoint_folder, "{}_metastate_best.pth".format(self.prefix)))
else: # the best epoch so far
torch.save(metastate_dict, os.path.join(self.checkpoint_folder, "{}_metastate_last.pth".format(self.prefix)))
class Evaluator(__BaseAgent):
"""
TODO: describe | |
the velocity head difference between the end of
the outlet conduit and the destination node: <font color=blue>%s</font><br><br>
If less than 0, number of table specifying
the pump speed as a function of time. If greater than 0, operation block
number controlling the pump. If 0, pump will run at its base
speed all the time: <font color=blue>%s</font><br><br>
Optional name for the pump (4 characters
max): <font color=blue>%s</font><br><br>
Elevation of the outlet conduit: <font color=blue>%s</font><br><br>
Area of the outlet conduit when flowing
full: <font color=blue>%s</font><br><br>
Elevation of the inlet conduit: <font color=blue>%s</font><br><br>
Factor on the velocity head at the
source node: <font color=blue>%s</font><br><br>
Factor on the velicity head at the
destination node: <font color=blue>%s</font><br><br>
Comments: <font color=blue>%s</font>
""" % (mtype, n2, n3, n4, n5, n6, n7, n8, n9, n10, f1, f2, \
f3, f4, f5, com)
elif mtype == "4":
while rtext[cc:ee] == " ": #
cc += 1 #
ee = cc + 1 # ------------------------------------------
if ee > maxee: # Here the code will read and assign a value
break # to variable 'n5'
while not rtext[cc:ee] == " ": #
if rtext[cc:ee] == ".": #
mvar = "mfloat" #
n5 = " " #
n6 = " "
n7 = " "
n8 = " "
n9 = " "
n10 = " "
break #
elif rtext[cc:ee] == "'": #
mvar = "mcom" #
n5 = " " #
n6 = " "
n7 = " "
n8 = " "
n9 = " "
n10 = " "
break #
cc += 1 #
ee = cc + 1 #
if ee > maxee: #
break
if mvar == "mint":
n5 = rtext[st:cc]
st = cc
while rtext[cc:ee] == " ": # 'n6'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == ".":
mvar = "mfloat"
n6 = " "
n7 = " "
n8 = " "
n9 = " "
n10 = " "
break
elif rtext[cc:ee] == "'":
mvar = "mcom"
n6 = " "
n7 = " "
n8 = " "
n9 = " "
n10 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mint":
n6 = rtext[st:cc]
st = cc
while rtext[cc:ee] == " ": # 'n7'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == ".":
mvar = "mfloat"
n7 = " "
n8 = " "
n9 = " "
n10 = " "
break
elif rtext[cc:ee] == "'":
mvar = "mcom"
n7 = " "
n8 = " "
n9 = " "
n10 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mint":
n7 = rtext[st:cc]
st = cc
while rtext[cc:ee] == " ": # 'n8'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == ".":
mvar = "mfloat"
n8 = " "
n9 = " "
n10 = " "
break
elif rtext[cc:ee] == "'":
mvar = "mcom"
n8 = " "
n9 = " "
n10 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mint":
n8 = rtext[st:cc]
st = cc
while rtext[cc:ee] == " ": # 'n9'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == ".":
mvar = "mfloat"
n9 = " "
n10 = " "
break
elif rtext[cc:ee] == "'":
mvar = "mcom"
n9 = " "
n10 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mint":
n9 = rtext[st:cc]
st = cc
while rtext[cc:ee] == " ": # 'n10'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == ".":
mvar = "mfloat"
n10 = " "
break
elif rtext[cc:ee] == "'":
mvar = "mcom"
n10 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mint":
n10 = rtext[st:cc]
st = cc
mvar = "mfloat"
else:
while not rtext[cc:ee] == " ":
cc -=1
ee = cc + 1
else:
while not rtext[cc:ee] == " ":
cc -= 1
ee = cc + 1
else:
while not rtext[cc:ee] == " ":
cc -= 1
ee = cc + 1
else:
while not rtext[cc:ee] == " ":
cc -= 1
ee = cc + 1
else:
while not rtext[cc:ee] == " ":
cc -= 1
ee = cc + 1
else:
while not rtext[cc:ee] == " ": # If the variable is a float or a comment,
cc -= 1 # the code will go back to the beginning of
ee = cc + 1 # the variable to read again
if mvar == "mfloat":
while rtext[cc:ee] == " ": # 'f1'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == "'":
mvar = "mcom"
f1 = " "
f2 = " "
f3 = " "
f4 = " "
f5 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mfloat":
f1 = rtext[st:cc]
st = cc
while rtext[cc:ee] == " ": # 'f2'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == "'":
mvar = "mcom"
f2 = " "
f3 = " "
f4 = " "
f5 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mfloat":
f2 = rtext[st:cc]
st = cc
while rtext[cc:ee] == " ": # 'f3'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == "'":
mvar = "mcom"
f3 = " "
f4 = " "
f5 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mfloat":
f3 = rtext[st:cc]
st = cc
while rtext[cc:ee] == " ": # 'f4'
cc += 1
ee = cc + 1
if ee > maxee:
break
while not rtext[cc:ee] == " ":
if rtext[cc:ee] == "'":
mvar = "mcom"
f4 = " "
f5 = " "
break
cc += 1
ee = cc + 1
if ee > maxee:
break
if mvar == "mfloat":
f4 = rtext[st:cc]
st = cc
mvar = "mcom"
else:
while not rtext[cc:ee] == " ":
cc -= 1
ee = cc + 1
else:
while not rtext[cc:ee] == " ":
cc -= 1
ee = cc + 1
else:
while not rtext[cc:ee] == " ":
cc -= 1
ee = cc + 1
else:
while not rtext[cc:ee] == " ":
cc -= 1
ee = cc + 1
if mvar == "mcom":
com = rtext[st:]
textback += """Type <font color=blue>%s</font>: bridge with flow over the
roadway.<br><br>
Upstream node: <font color=blue>%s</font><br><br>
Downstream node: <font color=blue>%s</font><br><br>
Node at which the flow through the
structure is specified: <font color=blue>%s</font><br><br>
Number of the table specifying
the bridge-loss coefficient as a function of water-surface
height at the bridge opening for positive flow: <font color=blue>%s</font><br><br>
Number of the table specifying
the bridge-loss coefficient as a function of water-surface
height at the bridge opening for | |
and len(cname) > 0:
self.write_attribute(NAME, cname)
if idc.is_bf(eid) == True:
self.write_numeric_attribute("BIT_MASK", mask)
self.close_tag()
def export_enum_references(self, addr):
"""
Finds and exports enum references at an address.
Args:
addr: Integer representing the instruction address.
"""
f = idc.get_full_flags(addr)
for op in range(2):
if ida_bytes.is_enum(f, op) == True:
self.export_enum_reference(addr, op)
def export_enums(self):
"""
Exports enumerations.
"""
num_enums = idc.get_enum_qty()
if (num_enums == 0):
return
for i in range(num_enums):
self.start_element(ENUM)
eid = idc.getn_enum(i)
ename = idc.get_enum_name(eid)
if (ename == None or len(ename) == 0):
continue
self.write_attribute(NAME, ename)
ewidth = idc.get_enum_width(eid)
if ewidth != 0 and ewidth <= 7:
self.write_numeric_attribute(SIZE, 1 << (ewidth - 1), 10)
eflags = idc.get_enum_flag(eid)
bf = idc.is_bf(eid)
# BIT_FIELD attribute not supported for ENUM export
# if bf == True:
# self.write_attribute(BIT_FIELD, "yes")
regcmt = idc.get_enum_cmt(eid, False)
rptcmt = idc.get_enum_cmt(eid, True)
has_children = ((idc.get_enum_size(eid) > 0) or
(regcmt != None) or (rptcmt != None) or
(ida_bytes.get_radix(eflags, 0) != 16) or
(self.is_signed_data(eflags) == True))
self.close_tag(has_children)
if (ida_bytes.get_radix(eflags, 0) != 16 or
self.is_signed_data(eflags) == True):
self.start_element(DISPLAY_SETTINGS)
if ida_bytes.get_radix(eflags, 0) != 16:
self.write_attribute(FORMAT, self.get_format(eflags))
if self.is_signed_data(eflags) == True:
self.write_attribute(SIGNED, "yes")
self.close_tag()
if regcmt != None:
self.export_regular_cmt(regcmt)
if rptcmt != None:
self.export_repeatable_cmt(rptcmt)
self.export_enum_members(eid, bf, eflags)
if (has_children):
self.end_element(ENUM)
def export_extra_comment(self, addr, cmt_type, extra):
"""
Exports pre- and post- comments for an address.
Args:
addr: Integer representing the instruction address.
cmt_type: String indicating comment type
extra: Integer representing extra comment index
"""
cmt = ''
nextline = idc.get_extra_cmt(addr, extra)
while (nextline != None):
# workaround for tag_remove bug is to add space
cmt += ida_lines.tag_remove(nextline + ' ')
extra += 1
nextline = idc.get_extra_cmt(addr, extra)
if (nextline != None):
cmt += '\n'
self.export_comment(addr, cmt_type, cmt)
def export_functions(self):
"""
Exports information about all functions.
"""
functions = idautils.Functions()
if functions == None:
return
self.update_status(FUNCTIONS)
timer = time.clock()
self.start_element(FUNCTIONS, True)
for addr in functions:
function = ida_funcs.get_func(addr)
if ida_segment.is_spec_ea(function.start_ea) == True:
continue
self.start_element(FUNCTION)
self.write_address_attribute(ENTRY_POINT, function.start_ea)
if ida_bytes.has_user_name(idc.get_full_flags(addr)) == True:
name = self.get_symbol_name(addr)
if name != None and len(name) > 0:
self.write_attribute(NAME, name)
if function.flags & idc.FUNC_LIB != 0:
self.write_attribute(LIBRARY_FUNCTION, "y")
self.close_tag(True)
fchunks = idautils.Chunks(addr)
for (startEA, endEA) in fchunks:
self.start_element(ADDRESS_RANGE)
self.write_address_attribute(START, startEA)
self.write_address_attribute(END, endEA - 1)
self.close_tag()
regcmt = ida_funcs.get_func_cmt(function, False)
if regcmt != None:
self.export_regular_cmt(regcmt)
rptcmt = ida_funcs.get_func_cmt(function, True)
if rptcmt != None:
self.export_repeatable_cmt(rptcmt)
demangled = ida_name.get_demangled_name(addr,
DEMANGLED_TYPEINFO,
self.inf.demnames, True)
if demangled != None and demangled == "'string'":
demangled = None
outbuf = ''
# TODO: How to handle print_type for function typeinfo cmts
#outbuf = idaapi.print_type(addr, False)
has_typeinfo = (demangled != None or (outbuf != None and
len(outbuf) > 0))
if demangled != None:
self.export_typeinfo_cmt(demangled)
elif has_typeinfo == True:
self.export_typeinfo_cmt(outbuf[:-1])
self.export_stack_frame(function)
self.end_element(FUNCTION)
self.end_element(FUNCTIONS)
self.display_cpu_time(timer)
def export_manual_instruction(self, addr):
"""
Exports user-entered "manual instruction" at an address.
Args:
addr: Integer representing instruction address.
"""
text = idc.get_manual_insn(addr)
if text == None or len(text) == 0:
return
self.start_element(MANUAL_INSTRUCTION)
self.write_address_attribute(ADDRESS, addr)
self.close_tag(True)
self.write_text(text)
self.end_element(MANUAL_INSTRUCTION, False)
def export_manual_operand(self, addr):
"""
Exports user-entered "manual operands" at an address.
Args:
addr: Integer representing instruction address.
"""
for op in range(ida_ida.UA_MAXOP):
if ida_bytes.is_forced_operand(addr, op) == True:
text = idc.get_forced_operand(addr, op)
if text != None and len(text) > 0:
self.start_element(MANUAL_OPERAND)
self.write_address_attribute(ADDRESS, addr)
self.write_numeric_attribute(OPERAND_INDEX, op, 10)
self.close_tag(True)
self.write_text(text)
self.end_element(MANUAL_OPERAND, False)
def export_markup(self):
"""
Exports markup for instructions and data items including references
and manual instructions and operands.
"""
self.update_status(MARKUP)
timer = time.clock()
self.start_element(MARKUP, True)
addr = self.min_ea
while addr != BADADDR:
f = idc.get_full_flags(addr)
if self.options.MemoryReferences.checked == True:
if ida_bytes.has_xref(f) == True:
self.export_user_memory_reference(addr)
if ida_bytes.is_off(f, ida_bytes.OPND_ALL) == True:
self.export_memory_references(addr)
if (self.options.Functions.checked == True and
self.options.StackReferences.checked == True and
ida_bytes.is_stkvar(f, ida_bytes.OPND_ALL) == True):
self.export_stack_reference(addr)
if (self.options.DataTypes.checked == True and
ida_bytes.is_enum(f, ida_bytes.OPND_ALL) == True):
self.export_enum_references(addr)
if self.options.Manual.checked == True:
# TODO: Ask about OPND_ALL and retrieving additional manual operands
# if ida_bytes.is_forced_operand(addr, ida_bytes.OPND_ALL) ==
# True:
if (ida_bytes.is_forced_operand(addr, 0) == True or
ida_bytes.is_forced_operand(addr, 1) == True):
self.export_manual_operand(addr)
if ida_bytes.is_manual_insn(addr) == True:
self.export_manual_instruction(addr)
addr = idc.next_head(addr, self.max_ea)
self.end_element(MARKUP)
self.display_cpu_time(timer)
def export_members(self, s):
"""
Exports the members of a structure or union.
Args:
s: IDA structure/union instance
"""
nmembers = s.memqty
for n in range(nmembers):
m = s.get_member(n)
offset = m.soff
if s.is_union() == True:
offset = 0
self.start_element(MEMBER)
self.write_numeric_attribute(OFFSET, offset)
mname = ida_struct.get_member_name(m.id)
if len(mname) > 0:
self.write_attribute(NAME, mname)
dtype = self.get_member_type(m)
if ida_struct.is_varmember(m) == True:
msize = 0
size = 0
else:
mtibuf = ida_nalt.opinfo_t()
mti = ida_struct.retrieve_member_info(mtibuf, m)
# if IDA_SDK_VERSION < 640:
# msize = idaapi.get_type_size0(None, dtype)
# if msize == None or msize == 0:
# msize = ida_struct.get_member_size(m)
# else:
size = ida_struct.get_member_size(m)
#msize = idaapi.get_data_type_size(m.flag, mtibuf)
# TODO: How to handle get_date_type_size for structure members
msize = size
if size < msize:
size = msize
if (size != msize):
arraytype = self.get_member_type(m)
dtype = "%s[%d]" % (arraytype, size / msize)
self.write_attribute(DATATYPE, dtype)
self.write_numeric_attribute(SIZE, size * self.cbsize)
regcmt = ida_struct.get_member_cmt(m.id, False)
rptcmt = ida_struct.get_member_cmt(m.id, True)
hascmt = regcmt != None or rptcmt != None
self.close_tag(hascmt)
if (hascmt):
if regcmt != None:
self.export_regular_cmt(regcmt)
if rptcmt != None:
self.export_repeatable_cmt(rptcmt)
self.end_element(MEMBER)
def export_memory_contents(self, binfilename, binfile, start, end):
"""
Exports the binary memory contents in the database.
A MEMORY_CONTENTS element is generated for each contiguous address
range where each address in the range contains a value.
The binary values are store in a separate file (not the XML file),
and the MEMORY_CONTENTS element identifies the file and the
offset in the file where the address range is located.
Args:
binfilename: String containing the absolute filepath
binfile: IDA file instance for binary file
start: Integer representing the starting address
end: Integer representing the ending address
"""
length = 0
startaddr = start
for addr in range(start, end):
# reset start address when length == 0
if (length == 0):
startaddr = addr
has_val = ida_bytes.has_value(idc.get_full_flags(addr))
if has_val == True:
length += self.cbsize
next_address = idc.next_addr(addr)
if ((has_val == False) or (next_address != addr + 1) or
(next_address == end)):
if length > 0:
offset = binfile.tell()
ida_loader.base2file(binfile.get_fp(), offset, startaddr,
startaddr + length)
self.start_element(MEMORY_CONTENTS)
self.write_address_attribute(START_ADDR, startaddr)
self.write_attribute(FILE_NAME, binfilename)
self.write_numeric_attribute(FILE_OFFSET, offset)
self.write_numeric_attribute(LENGTH, length)
self.close_tag(False)
length = 0
def export_memory_map(self):
"""
Exports information about all memory blocks in the database.
A MEMORY_SECTION is generated for each block (segment). If the
memory block is initialized (has values), the contents are exported
using the MEMORY_CONTENTS element.
"""
nsegs = ida_segment.get_segm_qty()
if (nsegs == 0):
return
self.update_status(MEMORY_MAP)
timer = time.clock()
binfilename = ''
if (self.options.MemoryContent.checked == True):
(binfilename, ext) = os.path.splitext(self.filename)
binfilename += ".bytes"
self.binfile = ida_fpro.qfile_t()
self.binfile.open(binfilename, 'wb')
self.start_element(MEMORY_MAP, True)
for i in range(nsegs):
self.export_memory_section(ida_segment.getnseg(i), binfilename)
self.end_element(MEMORY_MAP)
if (self.options.MemoryContent.checked == True):
self.close_binfile()
self.display_cpu_time(timer)
def export_memory_reference(self, addr, op):
"""
Exports the memory reference for operand at the address.
Args:
addr: Integer representing the instruction address.
op: Integer representing the operand index (0-based)
"""
f = idc.get_full_flags(addr)
ri = ida_nalt.refinfo_t()
if ida_nalt.get_refinfo(ri, addr, op) == 1:
if ri.target != BADADDR:
target = ri.target
elif idc.is_code(f) == True:
insn = ida_ua.insn_t()
ida_ua.decode_insn(insn, addr)
target = insn.ops[op].value - ri.tdelta + ri.base
elif idc.is_data(f) == True:
target = self.get_data_value(addr) - ri.tdelta + ri.base
else:
return
else:
return
if ida_bytes.is_mapped(target) == False:
return
self.start_element(MEMORY_REFERENCE)
self.write_address_attribute(ADDRESS, addr)
self.write_numeric_attribute(OPERAND_INDEX, op, 10)
self.write_address_attribute(TO_ADDRESS, target)
self.write_attribute(PRIMARY, "y")
self.close_tag()
def export_memory_references(self, addr):
"""
Exports the memory references for any operands at the address.
Args:
addr: Integer representing the instruction address.
"""
f = idc.get_full_flags(addr)
for op in range(ida_ida.UA_MAXOP):
if ida_bytes.is_off(f, op) == True and (idc.is_data(f) == True or
(idc.is_code(f) == True and
self.is_imm_op(addr, op) == True)):
self.export_memory_reference(addr, op)
def export_memory_section(self, seg, binfilename):
"""
Exports segment information as a MEMORY_SECTIONS element.
Args:
seg: IDA segment instance
binfilename: String containing absolute filepath for binary file.
"""
segname = ida_segment.get_segm_name(seg)
self.start_element(MEMORY_SECTION)
self.write_attribute(NAME, segname)
self.write_address_attribute(START_ADDR, seg.start_ea)
length = (seg.end_ea - seg.start_ea) * self.cbsize
self.write_numeric_attribute(LENGTH, length)
perms = ""
if (seg.perm != 0):
if (seg.perm & ida_segment.SEGPERM_READ != 0):
perms += 'r'
if (seg.perm & ida_segment.SEGPERM_WRITE != 0):
perms += 'w'
if (seg.perm & ida_segment.SEGPERM_EXEC != 0):
perms | |
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
"""Tests concerning model groups validation"""
import unittest
import os.path
from textwrap import dedent
from typing import Any, Union, List, Optional
from xmlschema import XMLSchema10, XMLSchema11
from xmlschema.exceptions import XMLSchemaValueError
from xmlschema.validators.exceptions import XMLSchemaValidationError
from xmlschema.validators.particles import ParticleMixin
from xmlschema.validators.models import distinguishable_paths, ModelVisitor
from xmlschema.validators.groups import XsdGroup
from xmlschema.validators.elements import XsdElement
from xmlschema.testing import XsdValidatorTestCase
class ModelGroup(XsdGroup):
"""A subclass for testing XSD models, that disables element parsing and schema bindings."""
def __init__(self, model: str, min_occurs: int = 1, max_occurs: Optional[int] = 1) -> None:
ParticleMixin.__init__(self, min_occurs, max_occurs)
if model not in {'sequence', 'choice', 'all'}:
raise XMLSchemaValueError("invalid model {!r} for a group".format(model))
self._group: List[Union[ParticleMixin, 'ModelGroup']] = []
self.model: str = model
def __repr__(self) -> str:
return '%s(model=%r, occurs=%r)' % (self.__class__.__name__, self.model, self.occurs)
append: Any
class TestModelValidation(XsdValidatorTestCase):
TEST_CASES_DIR = os.path.join(os.path.dirname(__file__), '../test_cases')
schema_class = XMLSchema10
# --- Test helper functions ---
def check_advance_true(self, model, expected=None):
"""
Advances a model with a match condition and checks the expected error list or exception.
:param model: an ModelGroupVisitor instance.
:param expected: can be an exception class or a list. Leaving `None` means that an empty \
list is expected.
"""
if isinstance(expected, type) and issubclass(expected, Exception):
self.assertRaises(expected, lambda x: list(model.advance(x)), True)
else:
self.assertEqual([e for e in model.advance(True)], expected or [])
def check_advance_false(self, model, expected=None):
"""
Advances a model with a no-match condition and checks the
expected error list or or exception.
:param model: an ModelGroupVisitor instance.
:param expected: can be an exception class or a list. Leaving `None` means that \
an empty list is expected.
"""
if isinstance(expected, type) and issubclass(expected, Exception):
self.assertRaises(expected, lambda x: list(model.advance(x)), False)
else:
self.assertEqual([e for e in model.advance(False)], expected or [])
def check_advance(self, model, match, expected=None):
"""
Advances a model with an argument match condition and checks the expected error list.
:param model: an ModelGroupVisitor instance.
:param match: the matching boolean condition.
:param expected: can be an exception class or a list. Leaving `None` means that an empty \
list is expected.
"""
if isinstance(expected, type) and issubclass(expected, Exception):
self.assertRaises(expected, lambda x: list(model.advance(x)), match)
else:
self.assertEqual([e for e in model.advance(match)], expected or [])
def check_stop(self, model, expected=None):
"""
Stops a model and checks the expected errors list.
:param model: an ModelGroupVisitor instance.
:param expected: can be an exception class or a list. Leaving `None` means that an empty \
list is expected.
"""
if isinstance(expected, type) and issubclass(expected, Exception):
self.assertRaises(expected, lambda: list(model.stop()))
else:
self.assertEqual([e for e in model.stop()], expected or [])
# --- ModelVisitor methods ---
def test_iter_group(self):
group = ModelGroup('sequence', min_occurs=0, max_occurs=0)
model = ModelVisitor(group)
self.assertListEqual(list(model.items), [])
group = ModelGroup('choice')
group.append(ParticleMixin())
group.append(ParticleMixin())
group.append(ParticleMixin())
model = ModelVisitor(group)
model.occurs[group[1]] = 1
self.assertListEqual(list(model.items), group[1:])
group = ModelGroup('all')
group.append(ParticleMixin())
group.append(ParticleMixin())
group.append(ParticleMixin())
model = ModelVisitor(group)
model.occurs[group[1]] = 1
self.assertListEqual(list(model.items), group[2:])
# --- Vehicles schema ---
def test_vehicles_model(self):
# Sequence with two not-emptiable single-occurs elements
group = self.vh_schema.elements['vehicles'].type.content
model = ModelVisitor(group)
self.check_advance_true(model) # <cars>
self.check_advance_true(model) # <bikes>
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_true(model) # <cars>
self.check_advance_true(model) # <bikes>
self.check_advance_true(model, ValueError) # <bikes>
self.assertIsNone(model.element)
def test_cars_model(self):
# Emptiable 1:1 sequence with one emptiable and unlimited element.
group = self.vh_schema.elements['cars'].type.content
model = ModelVisitor(group)
self.check_advance_true(model) # <car>
self.check_advance_true(model) # <car>
self.check_advance_true(model) # <car>
self.check_advance_false(model) # (end)
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_false(model) # <not-a-car>
self.assertIsNone(model.element)
# --- Collection schema ---
def test_collection_model(self):
# Sequence with one not-emptiable and unlimited element.
group = self.col_schema.elements['collection'].type.content
model = ModelVisitor(group)
self.check_advance_true(model) # <car>
self.check_advance_true(model) # <car>
self.check_advance_true(model) # <car>
self.check_advance_true(model) # <car>
self.check_advance_false(model) # (end)
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_false(model, [(group[0], 0, [group[0]])]) # <not-a-car>
self.assertIsNone(model.element)
def test_person_type_model(self):
# Sequence with four single elements, last two are also emptiable.
group = self.col_schema.types['personType'].content
model = ModelVisitor(group)
self.check_advance_true(model) # <name>
self.check_advance_true(model) # <born>
self.check_advance_true(model) # <dead>
self.check_advance_true(model) # <qualification>
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_true(model) # <name>
self.check_advance_true(model) # <born>
self.check_stop(model)
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_true(model) # <name> match
self.check_advance_false(model, [(group[1], 0, [group[1]])]) # <born> missing!
self.check_advance_true(model) # <dead> match
self.check_stop(model) # <qualification> is optional
self.assertIsNone(model.element)
# --- XSD 1.0/1.1 meta-schema models ---
def test_meta_simple_derivation_model(self):
"""
<xs:group name="simpleDerivation">
<xs:choice>
<xs:element ref="xs:restriction"/>
<xs:element ref="xs:list"/>
<xs:element ref="xs:union"/>
</xs:choice>
</xs:group>
"""
group = self.schema_class.meta_schema.groups['simpleDerivation']
model = ModelVisitor(group)
self.check_advance_true(model) # <restriction> matches
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_false(model) # <list> doesn't match with <restriction>
self.check_advance_true(model) # <list> matches
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_false(model) # <union> doesn't match with <restriction>
self.check_advance_false(model) # <union> doesn't match with <list>
self.check_advance_true(model) # <union> matches
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_false(model) # <other> doesn't match with <restriction>
self.check_advance_false(model) # <other> doesn't match with <list>
self.check_advance_false(model,
[(group, 0, group[:])]) # <other> doesn't match with <union>
self.assertIsNone(model.element)
def test_meta_simple_restriction_model(self):
"""
<!-- XSD 1.0 -->
<xs:group name="facets">
<xs:choice>
<xs:element ref="xs:minExclusive"/>
<xs:element ref="xs:minInclusive"/>
<xs:element ref="xs:maxExclusive"/>
<xs:element ref="xs:maxInclusive"/>
<xs:element ref="xs:totalDigits"/>
<xs:element ref="xs:fractionDigits"/>
<xs:element ref="xs:length"/>
<xs:element ref="xs:minLength"/>
<xs:element ref="xs:maxLength"/>
<xs:element ref="xs:enumeration"/>
<xs:element ref="xs:whiteSpace"/>
<xs:element ref="xs:pattern"/>
</xs:choice>
</xs:group>
<xs:group name="simpleRestrictionModel">
<xs:sequence>
<xs:element name="simpleType" type="xs:localSimpleType" minOccurs="0"/>
<xs:group ref="xs:facets" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
</xs:group>
<!-- XSD 1.1 -->
<xs:group name="simpleRestrictionModel">
<xs:sequence>
<xs:element name="simpleType" type="xs:localSimpleType" minOccurs="0"/>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element ref="xs:facet"/> <!-- Use a substitution group -->
<xs:any processContents="lax" namespace="##other"/>
</xs:choice>
</xs:sequence>
</xs:group>
"""
# Sequence with an optional single element and an optional unlimited choice.
group = self.schema_class.meta_schema.groups['simpleRestrictionModel']
model = ModelVisitor(group)
if self.schema_class.XSD_VERSION == '1.0':
self.assertEqual(model.element, group[0])
self.check_advance_true(model) # <simpleType> matches
self.assertEqual(model.element, group[1][0][0])
self.check_advance_false(model) # <maxExclusive> does not match
self.assertEqual(model.element, group[1][0][1])
self.check_advance_false(model) # <maxExclusive> does not match
self.assertEqual(model.element, group[1][0][2])
self.check_advance_true(model) # <maxExclusive> matches
self.assertEqual(model.element, group[1][0][0])
for _ in range(12):
self.check_advance_false(model) # no match for the inner choice group "xs:facets"
self.assertIsNone(model.element)
def test_meta_schema_top_model(self):
"""
<xs:group name="schemaTop">
<xs:choice>
<xs:group ref="xs:redefinable"/>
<xs:element ref="xs:element"/>
<xs:element ref="xs:attribute"/>
<xs:element ref="xs:notation"/>
</xs:choice>
</xs:group>
<xs:group name="redefinable">
<xs:choice>
<xs:element ref="xs:simpleType"/>
<xs:element ref="xs:complexType"/>
<xs:element ref="xs:group"/>
<xs:element ref="xs:attributeGroup"/>
</xs:choice>
</xs:group>
"""
group = self.schema_class.meta_schema.groups['schemaTop']
model = ModelVisitor(group)
self.assertEqual(model.element, group[0][0][0])
self.check_advance_false(model) # <simpleType> doesn't match
self.assertEqual(model.element, group[0][0][1])
self.check_advance_true(model) # <complexType> matches
self.assertIsNone(model.element)
model.restart()
self.assertEqual(model.element, group[0][0][0])
self.check_advance_false(model) # <simpleType> doesn't match
self.assertEqual(model.element, group[0][0][1])
self.check_advance_false(model) # <complexType> doesn't match
self.assertEqual(model.element, group[0][0][2])
self.check_advance_false(model) # <group> doesn't match
self.assertEqual(model.element, group[0][0][3])
self.check_advance_false(model) # <attributeGroup> doesn't match
self.assertEqual(model.element, group[1])
self.check_advance_false(model) # <element> doesn't match
self.assertEqual(model.element, group[2])
self.check_advance_false(model) # <attribute> doesn't match
self.assertEqual(model.element, group[3])
self.check_advance_false(
model, [(group, 0, group[0][0][:] + group[1:])]) # <notation> doesn't match
model.restart()
self.assertEqual(model.element, group[0][0][0])
self.check_advance_false(model) # <simpleType> doesn't match
self.assertEqual(model.element, group[0][0][1])
self.check_advance_false(model) # <complexType> doesn't match
self.assertEqual(model.element, group[0][0][2])
self.check_advance_false(model) # <group> doesn't match
self.assertEqual(model.element, group[0][0][3])
self.check_advance_false(model) # <attributeGroup> doesn't match
self.assertEqual(model.element, group[1])
self.check_advance_false(model) # <element> doesn't match
self.assertEqual(model.element, group[2])
self.check_advance_true(model) # <attribute> doesn't match
self.assertIsNone(model.element)
def test_meta_attr_declarations_group(self):
"""
<xs:group name="attrDecls">
<xs:sequence>
<xs:choice minOccurs="0" maxOccurs="unbounded">
<xs:element name="attribute" type="xs:attribute"/>
<xs:element name="attributeGroup" type="xs:attributeGroupRef"/>
</xs:choice>
<xs:element ref="xs:anyAttribute" minOccurs="0"/>
</xs:sequence>
</xs:group>
"""
group = self.schema_class.meta_schema.groups['attrDecls']
model = ModelVisitor(group)
for match in [False, False, True]:
self.check_advance(model, match)
self.assertIsNone(model.element)
model = ModelVisitor(group)
self.check_advance_false(model)
self.check_advance_true(model)
self.assertEqual(model.element, group[0][0])
model = ModelVisitor(group)
for match in [False, True, False, False]:
self.check_advance(model, match)
self.assertEqual(model.element, group[1])
model = ModelVisitor(group)
for match in [False, True, True, False, True, False, False]:
self.check_advance(model, match)
self.assertEqual(model.element, group[1])
def test_meta_complex_type_model(self):
"""
<xs:group name="complexTypeModel">
<xs:choice>
<xs:element ref="xs:simpleContent"/>
<xs:element ref="xs:complexContent"/>
<xs:sequence>
<xs:group ref="xs:typeDefParticle" minOccurs="0"/>
<xs:group ref="xs:attrDecls"/>
</xs:sequence>
</xs:choice>
</xs:group>
<xs:group name="typeDefParticle">
<xs:choice>
<xs:element name="group" type="xs:groupRef"/>
<xs:element ref="xs:all"/>
<xs:element ref="xs:choice"/>
<xs:element ref="xs:sequence"/>
</xs:choice>
</xs:group>
<xs:group name="complexTypeModel">
<xs:choice>
<xs:element ref="xs:simpleContent"/>
<xs:element ref="xs:complexContent"/>
<xs:sequence>
<xs:element ref="xs:openContent" minOccurs="0"/>
<xs:group ref="xs:typeDefParticle" minOccurs="0"/>
<xs:group ref="xs:attrDecls"/>
<xs:group ref="xs:assertions"/>
</xs:sequence>
</xs:choice>
</xs:group>
"""
group = self.schema_class.meta_schema.groups['complexTypeModel']
model = ModelVisitor(group)
self.assertEqual(model.element, group[0])
self.check_advance_true(model) # <simpleContent> matches
self.assertIsNone(model.element)
model.restart()
self.assertEqual(model.element, group[0])
self.check_advance_false(model)
self.check_advance_true(model) # <complexContent> matches
self.assertIsNone(model.element)
if self.schema_class.XSD_VERSION == '1.0':
model.restart()
self.assertEqual(model.element, group[0])
for match in [False, False, False, False, True]:
self.check_advance(model, match) # <all> matches
self.check_stop(model)
self.assertIsNone(model.element)
model.restart()
self.assertEqual(model.element, group[0])
for match in [False, False, False, False, True, False, True, False, False, False]:
self.check_advance(model, match) # <all> and <attributeGroup> match
self.assertIsNone(model.element)
def test_meta_schema_document_model(self):
group = self.schema_class.meta_schema.elements['schema'].type.content
# A schema model with a wrong tag
model = ModelVisitor(group)
if self.schema_class.XSD_VERSION == '1.0':
self.assertEqual(model.element, group[0][0])
self.check_advance_false(model) # | |
<filename>pychunkedgraph/backend/chunkedgraph.py
import collections
import numpy as np
import time
import datetime
import os
import sys
import networkx as nx
import pytz
import cloudvolume
import re
import itertools
import logging
from itertools import chain
from multiwrapper import multiprocessing_utils as mu
from pychunkedgraph.backend import cutting, chunkedgraph_comp
from pychunkedgraph.backend.chunkedgraph_utils import compute_indices_pandas, \
compute_bitmasks, get_google_compatible_time_stamp, \
get_time_range_filter, get_time_range_and_column_filter, get_max_time, \
combine_cross_chunk_edge_dicts, get_min_time, partial_row_data_to_column_dict
from pychunkedgraph.backend.utils import serializers, column_keys, row_keys, basetypes
from pychunkedgraph.backend import chunkedgraph_exceptions as cg_exceptions
from pychunkedgraph.meshing import meshgen
from google.api_core.retry import Retry, if_exception_type
from google.api_core.exceptions import Aborted, DeadlineExceeded, \
ServiceUnavailable
from google.auth import credentials
from google.cloud import bigtable
from google.cloud.bigtable.row_filters import TimestampRange, \
TimestampRangeFilter, ColumnRangeFilter, ValueRangeFilter, RowFilterChain, \
ColumnQualifierRegexFilter, RowFilterUnion, ConditionalRowFilter, \
PassAllFilter, RowFilter, RowKeyRegexFilter, FamilyNameRegexFilter
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable.column_family import MaxVersionsGCRule
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, NamedTuple
HOME = os.path.expanduser("~")
N_DIGITS_UINT64 = len(str(np.iinfo(np.uint64).max))
LOCK_EXPIRED_TIME_DELTA = datetime.timedelta(minutes=1, seconds=00)
UTC = pytz.UTC
# Setting environment wide credential path
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = \
HOME + "/.cloudvolume/secrets/google-secret.json"
class ChunkedGraph(object):
def __init__(self,
table_id: str,
instance_id: str = "pychunkedgraph",
project_id: str = "neuromancer-seung-import",
chunk_size: Tuple[np.uint64, np.uint64, np.uint64] = None,
fan_out: Optional[np.uint64] = None,
n_layers: Optional[np.uint64] = None,
credentials: Optional[credentials.Credentials] = None,
client: bigtable.Client = None,
dataset_info: Optional[object] = None,
is_new: bool = False,
logger: Optional[logging.Logger] = None) -> None:
if logger is None:
self.logger = logging.getLogger(f"{project_id}/{instance_id}/{table_id}")
self.logger.setLevel(logging.WARNING)
if not self.logger.handlers:
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.WARNING)
self.logger.addHandler(sh)
else:
self.logger = logger
if client is not None:
self._client = client
else:
self._client = bigtable.Client(project=project_id, admin=True,
credentials=credentials)
self._instance = self.client.instance(instance_id)
self._table_id = table_id
self._table = self.instance.table(self.table_id)
if is_new:
self._check_and_create_table()
self._dataset_info = self.check_and_write_table_parameters(
column_keys.GraphSettings.DatasetInfo, dataset_info,
required=True, is_new=is_new)
self._cv_path = self._dataset_info["data_dir"] # required
self._mesh_dir = self._dataset_info.get("mesh", None) # optional
self._n_layers = self.check_and_write_table_parameters(
column_keys.GraphSettings.LayerCount, n_layers,
required=True, is_new=is_new)
self._fan_out = self.check_and_write_table_parameters(
column_keys.GraphSettings.FanOut, fan_out,
required=True, is_new=is_new)
self._chunk_size = self.check_and_write_table_parameters(
column_keys.GraphSettings.ChunkSize, chunk_size,
required=True, is_new=is_new)
self._dataset_info["graph"] = {"chunk_size": self.chunk_size}
self._bitmasks = compute_bitmasks(self.n_layers, self.fan_out)
self._cv = None
# Hardcoded parameters
self._n_bits_for_layer_id = 8
self._cv_mip = 0
@property
def client(self) -> bigtable.Client:
return self._client
@property
def instance(self) -> bigtable.instance.Instance:
return self._instance
@property
def table(self) -> bigtable.table.Table:
return self._table
@property
def table_id(self) -> str:
return self._table_id
@property
def instance_id(self):
return self.instance.instance_id
@property
def project_id(self):
return self.client.project
@property
def family_id(self) -> str:
return "0"
@property
def incrementer_family_id(self) -> str:
return "1"
@property
def log_family_id(self) -> str:
return "2"
@property
def cross_edge_family_id(self) -> str:
return "3"
@property
def family_ids(self):
return [self.family_id, self.incrementer_family_id, self.log_family_id,
self.cross_edge_family_id]
@property
def fan_out(self) -> np.uint64:
return self._fan_out
@property
def chunk_size(self) -> np.ndarray:
return self._chunk_size
@property
def segmentation_chunk_size(self) -> np.ndarray:
return self.cv.scale["chunk_sizes"][0]
@property
def segmentation_resolution(self) -> np.ndarray:
return np.array(self.cv.scale["resolution"])
@property
def segmentation_bounds(self) -> np.ndarray:
return np.array(self.cv.bounds.to_list()).reshape(2, 3)
@property
def n_layers(self) -> np.uint64:
return self._n_layers
@property
def bitmasks(self) -> Dict[int, int]:
return self._bitmasks
@property
def cv_mesh_path(self) -> str:
return "%s/%s" % (self._cv_path, self._mesh_dir)
@property
def dataset_info(self) -> object:
return self._dataset_info
@property
def cv_mip(self) -> int:
return self._cv_mip
@property
def cv(self) -> cloudvolume.CloudVolume:
if self._cv is None:
self._cv = cloudvolume.CloudVolume(self._cv_path, mip=self._cv_mip,
info=self.dataset_info)
return self._cv
@property
def root_chunk_id(self):
return self.get_chunk_id(layer=int(self.n_layers), x=0, y=0, z=0)
def _check_and_create_table(self) -> None:
""" Checks if table exists and creates new one if necessary """
table_ids = [t.table_id for t in self.instance.list_tables()]
if not self.table_id in table_ids:
self.table.create()
f = self.table.column_family(self.family_id)
f.create()
f_inc = self.table.column_family(self.incrementer_family_id,
gc_rule=MaxVersionsGCRule(1))
f_inc.create()
f_log = self.table.column_family(self.log_family_id)
f_log.create()
f_ce = self.table.column_family(self.cross_edge_family_id,
gc_rule=MaxVersionsGCRule(1))
f_ce.create()
self.logger.info(f"Table {self.table_id} created")
def check_and_write_table_parameters(self, column: column_keys._Column,
value: Optional[Union[str, np.uint64]] = None,
required: bool = True,
is_new: bool = False
) -> Union[str, np.uint64]:
""" Checks if a parameter already exists in the table. If it already
exists it returns the stored value, else it stores the given value.
Storing the given values can be enforced with `is_new`. The function
raises an exception if no value is passed and the parameter does not
exist, yet.
:param column: column_keys._Column
:param value: Union[str, np.uint64]
:param required: bool
:param is_new: bool
:return: Union[str, np.uint64]
value
"""
setting = self.read_byte_row(row_key=row_keys.GraphSettings,
columns=column)
if (not setting or is_new) and value is not None:
row = self.mutate_row(row_keys.GraphSettings, {column: value})
self.bulk_write([row])
elif not setting and value is None:
assert not required
return None
else:
value = setting[0].value
return value
def is_in_bounds(self, coordinate: Sequence[int]):
""" Checks whether a coordinate is within the segmentation bounds
:param coordinate: [int, int, int]
:return bool
"""
coordinate = np.array(coordinate)
if np.any(coordinate < self.segmentation_bounds[0]):
return False
elif np.any(coordinate > self.segmentation_bounds[1]):
return False
else:
return True
def get_serialized_info(self):
""" Rerturns dictionary that can be used to load this ChunkedGraph
:return: dict
"""
info = {"table_id": self.table_id,
"instance_id": self.instance_id,
"project_id": self.project_id}
try:
info["credentials"] = self.client.credentials
except:
info["credentials"] = self.client._credentials
return info
def get_chunk_layer(self, node_or_chunk_id: np.uint64) -> int:
""" Extract Layer from Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: int
"""
return int(node_or_chunk_id) >> 64 - self._n_bits_for_layer_id
def get_chunk_coordinates(self, node_or_chunk_id: np.uint64
) -> np.ndarray:
""" Extract X, Y and Z coordinate from Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: Tuple(int, int, int)
"""
layer = self.get_chunk_layer(node_or_chunk_id)
bits_per_dim = self.bitmasks[layer]
x_offset = 64 - self._n_bits_for_layer_id - bits_per_dim
y_offset = x_offset - bits_per_dim
z_offset = y_offset - bits_per_dim
x = int(node_or_chunk_id) >> x_offset & 2 ** bits_per_dim - 1
y = int(node_or_chunk_id) >> y_offset & 2 ** bits_per_dim - 1
z = int(node_or_chunk_id) >> z_offset & 2 ** bits_per_dim - 1
return np.array([x, y, z])
def get_chunk_id(self, node_id: Optional[np.uint64] = None,
layer: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
z: Optional[int] = None) -> np.uint64:
""" (1) Extract Chunk ID from Node ID
(2) Build Chunk ID from Layer, X, Y and Z components
:param node_id: np.uint64
:param layer: int
:param x: int
:param y: int
:param z: int
:return: np.uint64
"""
assert node_id is not None or \
all(v is not None for v in [layer, x, y, z])
if node_id is not None:
layer = self.get_chunk_layer(node_id)
bits_per_dim = self.bitmasks[layer]
if node_id is not None:
chunk_offset = 64 - self._n_bits_for_layer_id - 3 * bits_per_dim
return np.uint64((int(node_id) >> chunk_offset) << chunk_offset)
else:
if not(x < 2 ** bits_per_dim and
y < 2 ** bits_per_dim and
z < 2 ** bits_per_dim):
raise Exception("Chunk coordinate is out of range for"
"this graph on layer %d with %d bits/dim."
"[%d, %d, %d]; max = %d."
% (layer, bits_per_dim, x, y, z,
2 ** bits_per_dim))
layer_offset = 64 - self._n_bits_for_layer_id
x_offset = layer_offset - bits_per_dim
y_offset = x_offset - bits_per_dim
z_offset = y_offset - bits_per_dim
return np.uint64(layer << layer_offset | x << x_offset |
y << y_offset | z << z_offset)
def get_chunk_ids_from_node_ids(self, node_ids: Iterable[np.uint64]
) -> np.ndarray:
""" Extract a list of Chunk IDs from a list of Node IDs
:param node_ids: np.ndarray(dtype=np.uint64)
:return: np.ndarray(dtype=np.uint64)
"""
# TODO: measure and improve performance(?)
return np.array(list(map(lambda x: self.get_chunk_id(node_id=x),
node_ids)), dtype=np.uint64)
def get_segment_id_limit(self, node_or_chunk_id: np.uint64) -> np.uint64:
""" Get maximum possible Segment ID for given Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: np.uint64
"""
layer = self.get_chunk_layer(node_or_chunk_id)
bits_per_dim = self.bitmasks[layer]
chunk_offset = 64 - self._n_bits_for_layer_id - 3 * bits_per_dim
return np.uint64(2 ** chunk_offset - 1)
def get_segment_id(self, node_id: np.uint64) -> np.uint64:
""" Extract Segment ID from Node ID
:param node_id: np.uint64
:return: np.uint64
"""
return node_id & self.get_segment_id_limit(node_id)
def get_node_id(self, segment_id: np.uint64,
chunk_id: Optional[np.uint64] = None,
layer: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
z: Optional[int] = None) -> np.uint64:
""" (1) Build Node ID from Segment ID and Chunk ID
(2) Build Node ID from Segment ID, Layer, X, Y and Z components
:param segment_id: np.uint64
:param chunk_id: np.uint64
:param layer: int
:param x: int
:param y: int
:param z: int
:return: np.uint64
"""
if chunk_id is not None:
return chunk_id | segment_id
else:
return self.get_chunk_id(layer=layer, x=x, y=y, z=z) | segment_id
def get_unique_segment_id_range(self, chunk_id: np.uint64, step: int = 1
) -> np.ndarray:
""" Return unique Segment ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
column = column_keys.Concurrency.CounterID
# Incrementer row keys start with an "i" followed by the chunk id
row_key = serializers.serialize_key("i%s" % serializers.pad_node_id(chunk_id))
append_row = self.table.row(row_key, append=True)
append_row.increment_cell_value(column.family_id, column.key, step)
# This increments the row entry and returns the value | |
import sqlite3
from dataclasses import dataclass, field
import typing
from enum import IntEnum
from src.Errors import *
# TODO add date as a special type (subset of text - sqlite doesn't have native date/time support)
# TODO refactor the executes into a private function (_run)
# * isolate the connect, execute, and close calls inside
# * determine how to return values without knowing the query type
# * make thread/process safe with lock/flag file (location configurable)
# TODO check for errors raised on add - re-add value with unique on column?
# TODO allow for comparison values in the filtering conditions to be other columns, or columns from other tables.
@dataclass()
class Column:
"""
Representation of a column in the table. Allows for consolidation of the validation functions and meta-data.
The validation is performed through a single function which takes only the prospective value as an argument, and
returns True if the value is good. This defaults to a simple type check.
"""
pragma: tuple
headers: tuple
_validator: type(len) = field(init=False)
Name: str = field(init=False)
ColumnType: str = field(init=False)
Default: typing.Any = field(init=False)
PrimaryKey: bool = field(init=False)
NotNull: bool = field(init=False)
# Expected label names from the pragma read
__LBLNAME = 'name' # name of the column
__LBLTYPE = 'type' # data type of the column
__LBLDFT = 'dflt_value' # default value of the column
__LBLPK = 'pk' # primary key flag
__LBLNN = 'notnull' # notnull/nullable flag
def __post_init__(self):
self.Name = self.pragma[self.headers.index(self.__LBLNAME)]
self.ColumnType = self.pragma[self.headers.index(self.__LBLTYPE)]
self.Default = self.pragma[self.headers.index(self.__LBLDFT)]
self.PrimaryKey = self.pragma[self.headers.index(self.__LBLPK)]
self.NotNull = self.pragma[self.headers.index(self.__LBLNN)]
# default validators - basic sqlite data types
__VALIDATORS = {
'integer': (lambda val: isinstance(val, int) or val == None),
'real': (lambda val: isinstance(val, float) or val == None),
'text': (lambda val: isinstance(val, str) or val == None),
'null': (lambda val: val is None),
'blob': (lambda val: True) # just let it ride
}
self._validator = __VALIDATORS[self.ColumnType]
if self.Default is None:
defaults = {
'integer': 0,
'real': 0.0,
'text': '',
'null': None,
'blob': b''
}
self.Default = defaults[self.ColumnType]
def Validate(self, value: typing.Any) -> bool:
"""
Checks if the value passed in is appropriate for this column.
:param value: The candidate to validate.
:return: True if the value can be used for this column, False otherwise.
"""
return self._validator(value)
def Set_Validator(self, vdator: type(len)):
"""
Changes the validation function for a column.
:param vdator: The new validator function.
"""
self._validator = vdator
def ReadAttribute(self, attr: str) -> typing.Any:
"""
Future proofing in case something shows up later we need easy access to.
:param attr: The meta-data attribute to get.
:return: The value of the attribute.
"""
return self.pragma[self.headers.index(attr)]
class ComparisonOps(IntEnum):
"""
Enumeration of the operations usable in filters for the Tables class.
"""
Noop = 0
EQUALS = 1
NOTEQ = 2
GREATER = 3
GRorEQ = 4
LESSER = 5
LSorEQ = 6
LIKE = 7
IN = 8
IS = 9
def AsStr(self):
strs = {
ComparisonOps.EQUALS: '=',
ComparisonOps.NOTEQ: '<>',
ComparisonOps.GREATER: '>',
ComparisonOps.GRorEQ: '>=',
ComparisonOps.LESSER: '<',
ComparisonOps.LSorEQ: '<=',
ComparisonOps.LIKE: 'like',
ComparisonOps.IN: 'in',
ComparisonOps.IS: 'is'
}
return strs[self.value]
@dataclass()
class Where:
column: str
operator: ComparisonOps
value: str
class Table:
"""
Defines a single table from the database. Provides operations to read and write, but not create.
"""
# region 'Constants'
# Expected label names from the pragma read
__LBLNAME = 'name' # name of the column
__LBLTYPE = 'type' # data type of the column
__LBLDFT = 'dflt_value' # default value of the column
__LBLPK = 'pk' # primary key flag
__LBLNN = 'notnull' # notnull/nullable flag
#endregion
def __init__(self, name: str, dbfile: str):
self.DB = dbfile
self.TableName = name
# grab the data
self.__client = sqlite3.connect(self.DB)
cq = self.__client.execute("pragma table_info({0})".format(self.TableName))
# init the columns dictionary and primary keys list
self._columns = {} # this will hold _Column objects indexed by name
self._pks = [] # a list of the names of primary keys
self._filters = [] # where clauses
# clabels are the pragma field names for the column meta data
self._clabels = [x[0] for x in cq.description]
# move the data into the dictionary
for col in cq.fetchall():
name = col[self._clabels.index(self.__LBLNAME)]
# save the name of the column by the index of the key
self._columns[name] = Column(pragma=col, headers=self._clabels)
# test for pk status
if self._columns[name].PrimaryKey:
# if this is a primary key save it in that list
self._pks.append(name)
# end for col
# end __init__()
# region Private Helpers
# helper to make sure the value is formatted properly for the column
def _buildWhere(self, column: str, op: ComparisonOps, val: typing.Any):
# build the clause
if self._columns[column].ColumnType == 'text' and val != 'null':
return f"{column} {op.AsStr()} '{val}' "
else:
return f"{column} {op.AsStr()} {val}"
# endregion
#region DB Interactions
def Join(self, other, otherCol: str, myCol: str):
"""
Creates a psuedo-table by performing a left join on the table other.
This will only join on equals between two columns.
:param other: The table to join with.
:param otherCol: The name of the column from the other table to join with.
:param myCol: The the name of the column from within this table to match to otherCol.
:return:
"""
pass
def GetAll(self) -> list:
"""
Performs a get for all the columns in the table. Any filters set still apply to the results.
:return: The results.
"""
return self.Get(list(self._columns.keys()))
def Get(self, columns: list) -> list:
"""
Retrieves all values of a set of columns. If the where clause is specified then only the matching values are
returned.
:param columns: A list of the column names to select.
:return:
"""
# sanity check the columns
for c in columns:
if c not in self._columns.keys():
raise ImaginaryColumn(self.TableName, c)
# end for c
# build the query - start with the basic select portion
# initialize the select statement
query = f"Select {str.join(', ', columns)} From {self.TableName}"
# add the where clause(s)
if len(self._filters) > 0:
# add the intial where
query += f" Where {self._buildWhere(self._filters[0].column, self._filters[0].operator, self._filters[0].value)}"
# add additional clauses if needed
if len(self._filters) > 1:
for f in self._filters:
query += f' and {self._buildWhere(f.column, f.operator, f.value)}'
# end for filters
# end if len > 1
# end if len > 0
# execute the query
print(query)
cur = self.__client.execute(query)
# marshall the results and return the rows
return cur.fetchall()
def Add(self, values):
"""
Adds a new entry to the table.
:param values: A map of the column names and values. Any missing values will be filled in with the default value (except primary keys).
"""
cols = list(self._columns.keys())
vals = {}
# grab the values from the parameter
for k in values.keys():
if k not in self._columns.keys():
raise ImaginaryColumn(self.TableName, k)
cols.remove(k)
# do not add in primary keys
if k not in self._pks:
vals[k] = values[k]
# fill in any missing values with the defaults
for c in cols:
# let sqlite handle filling in the primary keys
if c not in self._pks:
vals[c] = self._columns[c].Default
mediator = "\',\'"
# with all the
insert = f"Insert into {self.TableName}({str.join(',', list(vals.keys()))}) values ('{str.join(mediator, list(vals.values()))}')"
# perform the action
cur = self.__client.cursor()
cur.execute(insert)
self.__client.commit()
def UpdateValue(self, name: str, value: typing.Any, compname: str = '', operator: ComparisonOps = ComparisonOps.Noop
, compval: typing.Any = None):
"""
Update a single column on all rows matching the condition defined by the operator, compname, and compval. If no
condition is defined here, the current filter is used.
:param compname: The name of the column the condition is based on.
:param name: Name of the column to update.
:param value: The new value of the column.
:param operator: the operator for the condition clause.
:param compval: The value to compare the current value of the column to.
"""
# TODO make the where clause a list of tuples or actual where objects?
# verify the column
if name not in self._columns.keys():
raise ImaginaryColumn(self.TableName, name)
# verify the value is legal
if | |
from . import auth_blueprint
from flask.views import MethodView
from flask import make_response, request, jsonify, session
from flask_cors import cross_origin
from app.models.user import User
from app.models.token import Token
from app import db
from flasgger import swag_from
import re
import json
from sqlalchemy.sql.expression import or_, and_
from numbers import Number
from werkzeug.security import generate_password_hash, check_password_hash
class Registration(MethodView):
"""This class registers a new user."""
#handle post request for this view( url is auth/register)
@swag_from('../api-docs/register_a_user.yml')
def post(self):
#get the json data sent over post as a dictionary
try:
#check if it was json data that was sent
if request.is_json:
data = request.get_json()
else:
response = {
"message": "Please supply json data",
"status": "failure"
}
return make_response(jsonify(response)), 400
except Exception as e:
response = {
"message": "An error occured: Here are the details - " + str(e),
"status": "failure"
}
return make_response(jsonify(response)), 500
#ensure that username, email and password keys are provided
try:
username = data['username']
password = data['password']
email = data['email']
except KeyError as missing_key:
response = {
"message": "Please supply a " + str(missing_key),
"status": "failure"
}
return make_response(jsonify(response)), 400
#check if username, password or email is empty
if not(username) or not(password) or not(email): # using not instead
response = {
"message": "Please supply a value for username, email and password",
"status": "failure"
}
return make_response(jsonify(response)), 400
# check if what was got from json for username or password is not a string
if not isinstance(username, str) or not isinstance(password, str) or not isinstance(email, str):
response = {
'message': 'Please supply string values for username, email and password',
"status": "failure"
}
return make_response(jsonify(response)), 401
#check if email is not in the right format
if re.search(r'[\w\.-]+@[\w\.-]+', email) is None:
response = {
"message": "Please supply a valid email address",
"status": "failure"
}
return make_response(jsonify(response)), 400
# Check to see if the user already exists
user = User.query.filter(or_(User.username==data['username'], User.email==data['email'])).first()
if user is not None:
# There is an existing user. We don't want to register users twice
# Return a message to the user telling them that they they already exist
response = {
"message": 'User already exists. Please login.',
"status": "failure"
}
return make_response(jsonify(response)), 401
try:
# Register the user
username = data['username']
password = <PASSWORD>_password_<PASSWORD>(data['password'])
email = data['email']
new_user = User(username=username, email=email, password=password)
new_user.add()
response = {
"message": 'You registered successfully. Please log in.',
"status": "success"
}
# return a response notifying the user that they registered successfully
return make_response(jsonify(response)), 201
except Exception as e:
# An error occured, therefore return a string message containing the error
response = {
"message": "An error occurred, these are the details: " + str(e),
"status": "failure"
}
return make_response(jsonify(response)), 500
class Login(MethodView):
"""This class-based view handles user login"""
@swag_from('../api-docs/login_a_user.yml')
def post(self):
#Handle POST request for this view. Url ---> /auth/login
"""Endpoint to login a user"""
try:
#check if the request is json data
if request.is_json:
#get the json data sent over post as a dictionary
data = request.get_json()
else:
response = {
"message": "Please supply json data",
"status": "failure"
}
return make_response(jsonify(response)), 400
if (isinstance(data['username'], Number)) or (isinstance(data['password'], Number)):
response = {
"message": "Invalid values supplied, Please try again with text values",
"status": "failure"
}
return make_response(jsonify(response)), 401
if (not isinstance(data['username'], str)) and (not isinstance(data['password'], str)):
response = {
"message": "Invalid values supplied, Please try again with text values",
"status": "failure"
}
return make_response(jsonify(response)), 401
user = User.query.filter(
or_(User.username == data['username'],User.email == data['username'])).first()
#Verify correct password supplied
if user is None or \
check_password_hash(user.password, data['password']) is False:
# User does not exist
response = {
"message": 'Invalid username or password, Please try again',
"status": "failure"
}
return make_response(jsonify(response)), 401
user.logged_in = 1
access_token = User.generate_token(user.id)
db.session.commit()
#store token in the Tokens table
token = Token(token=access_token.decode())
db.session.add(token)
db.session.commit()
if access_token is not None:
response = {
"message": 'You logged in successfully.',
"access_token": access_token.decode(),
"status": "success",
"id": user.id,
"username": user.username,
"email":user.email
}
return make_response(jsonify(response)), 200
except json.JSONDecodeError:
response = {
"message": "Please supply a correct format for your json data",
"status": "failure"
}
return make_response(jsonify(response)), 400
except KeyError as key:
#username or password key is not supplied
response = {
"message": "Please supply a " + str(key),
"status": "failure"
}
return make_response(jsonify(response)), 400
except Exception as e:
response = {
"message": "Server error: "+ str(e),
"status": "failure"
}
# Return a server error using the HTTP Error Code 500 (Internal Server Error)
return make_response(jsonify(response)), 500
class Logout(MethodView):
"""This class-based view handles user logout"""
@swag_from('../api-docs/logout_a_user.yml')
def post(self):
#Handle POST request for this view. Url ---> /auth/logout
"""Endpoint to logout a user"""
try:
# get auth token
auth_header = request.headers.get('Authorization')
auth_token = None
if auth_header and len(auth_header.split(" ")) > 1:
auth_token = auth_header.split(" ")[1]
if auth_token is None:
return make_response(jsonify({
"message": "Token required",
"status": "failure"
})), 403
if auth_token is not None:
#decode the token that was stored after login to extract the user id
user_id = User.get_token_user_id(auth_token)
if user_id == "Expired token. Please login to get a new token":
#First check if token exists in the Token table
token = Token.query.filter_by(token=auth_token).first()
# Delete token from the logged in user's table if it is in the logged in user table
if token is not None:
Token.delete_token(token)
return make_response(jsonify(
{
"message": " Token Expired. Please login to get a new one",
"status": "failure"
}
)), 403
if user_id == "Invalid token. Please register or login":
return make_response(jsonify(
{
"message": " Invalid Token. Please login to get a new one",
"status": "failure"
}
)), 403
#check if token exists in the Token table
token = Token.query.filter_by(token=auth_token).first()
#Use the user ID that was decoded from the token to extract
# the user so u can change the logged in flag to 0
user = User.query.filter_by(id=int(user_id)).first()
#check if the token is stored in the table with tokens
if token is not None:
#remove the token from the token table
Token.delete_token(token)
#set the user logged in flag to 0
user.logged_in = 0
user.save()
# create the response
response = {
"message": 'Logout Successful',
"status": "success"
}
# send the response
return make_response(jsonify(response)), 201
else:
#log out user if not already logged out
response = {
"message": 'No need you are already logged out',
"status": "success"
}
#make and send the response
return make_response(jsonify(response)), 303
except Exception as e:
response = {
"message": " Internal server error " + str(e),
"status": "failure"
}
# Return a server error using the HTTP Error Code 500 (Internal Server Error)
return make_response(jsonify(response)), 500
class Reset_password(MethodView):
"""This class-based view handles password resetting"""
@swag_from('../api-docs/reset_password.yml')
def post(self):
#Handle POST request for this view. Url ---> /auth/reset-password
"""Endpoint to reset"""
try:
# get auth token
auth_header = request.headers.get('Authorization')
auth_token = None
if auth_header and len(auth_header.split(" ")) > 1:
auth_token = auth_header.split(" ")[1]
if auth_token is None:
return make_response(jsonify(
{
"message": "Token required",
"status": "failure"
}
)), 403
if auth_token is not None:
if request.is_json:
data = request.get_json()
previous_password = data['<PASSWORD>']
new_password = data['<PASSWORD>']
if not isinstance(previous_password, str) or not isinstance(new_password, str):
response = {
"message": "Sorry, password reset unsuccessful. Please supply string values",
"status": "failure"
}
return make_response(jsonify(response)), 401
if new_password == "":
response = {
"message": "Please supply a value for your new password",
"status": "failure"
}
return make_response(jsonify(response)), 400
else:
response = {
"message": 'Please supply json data',
"status": "failure"
}
#make and send the response
return make_response(jsonify(response)), 400
#decode the token that was stored after login to
# extract the user id
user_id = User.get_token_user_id(auth_token)
if user_id == "Expired token. Please login to get a new token":
return make_response(jsonify(
{
"message": " Token Expired. Please login to get a new one",
"status": "failure"
}
)), 403
if user_id == "Invalid token. Please register or login":
return make_response(jsonify(
{
"message": " Invalid Token. Please login to get a new one",
"status": "failure"
}
)), 403
#check if token exists in | |
"""
"""
import math
import random
from PySide2.QtCore import QTimer, QSize, QRect
from PySide2.QtGui import QPainter, QPen, QColor
from PySide2.QtWidgets import QApplication, QWidget, QHBoxLayout, QFormLayout, QPushButton, QSpinBox, QDoubleSpinBox, \
QCheckBox
TOTAL_BOIDS = 150
DEFAULT_MAX_SPEED = 9.0
DEFAULT_MAX_DISTANCE = 60.0
DEFAULT_EYE_SIGHT_ANGLE = 120.0
SEPARATION_DISTANCE = 20.0
PUSH_BACK_SPEED = 0.5
COHESION_STRENGTH = 0.01
SEPRATION_STRENGTH = 0.1
SEPARATION_VALUE = 1.0
COHESION_VALUE = 1.0
ALIGNMENT_VALUE = 1.0
BOID_SIZE = 5.0
DEBUG = False
class Rect:
def __init__(self, x=0.0, y=0.0, width=0.0, height=0.0):
self.x = x
self.y = y
self.width = width
self.height = height
class Vec2(object):
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
def __repr__(self):
return 'Vec2(x={!r}, y={!r})'.format(self.x, self.y)
class BoidsEnvironment:
def __init__(self):
self.is_running = True
self.rect = Rect()
self.max_speed = DEFAULT_MAX_SPEED
self.boids = []
self.neighbour_max_distance = DEFAULT_MAX_DISTANCE
self.neighbour_max_distance2 = DEFAULT_MAX_DISTANCE * DEFAULT_MAX_DISTANCE
class Boid:
def __init__(self):
self.pos = Vec2()
self.velocity = Vec2(0.0, 0.0)
# self.steering = random.random() * 0.1
self.debug = False
self.eyesight = math.radians(DEFAULT_EYE_SIGHT_ANGLE)
self.steering = 0.05
self.separation_distance = SEPARATION_DISTANCE
self.separation_distance2 = SEPARATION_DISTANCE * SEPARATION_DISTANCE
self.separation = 1.0
self.alignment = 1.0
self.cohesion = 1.0
def add_vec2(vec1, vec2):
return Vec2(x=vec1.x + vec2.x, y=vec1.y + vec2.y)
def sub_vec2(vec1, vec2):
return Vec2(x=vec1.x - vec2.x, y=vec1.y - vec2.y)
def substract_vec2(vec1, vec2):
vec1.x -= vec2.x
vec1.y -= vec2.y
def mul_vec2(vec, value):
return Vec2(x=vec.x * value, y=vec.y * value)
def len_vec(vec):
return math.sqrt(vec.x * vec.x + vec.y * vec.y)
def len2_vec(vec):
return vec.x * vec.x + vec.y * vec.y
def div_vec(vec, value):
return Vec2(x=vec.x / value, y=vec.y / value)
def normalized_vec(vec):
length = len_vec(vec)
return Vec2(vec.x / length, vec.y / length)
def dot_vec(vec1, vec2):
return vec1.x * vec2.x + vec1.y * vec2.y
def get_neighbours(environment, boid, angle=True):
neighbours = []
angle_neighbours = []
dist_vec = Vec2()
for boid_i in environment.boids:
if boid_i == boid:
continue
dist_vec.x = boid_i.pos.x
dist_vec.y = boid_i.pos.y
substract_vec2(dist_vec, boid.pos)
# distance = len_vec(dist_vec)
dist_dist = dist_vec.x * dist_vec.x + dist_vec.y * dist_vec.y
if dist_dist == 0.0:
neighbours.append(boid_i)
elif dist_dist <= environment.neighbour_max_distance2:
dist_normalised = normalized_vec(dist_vec)
dot = dot_vec(boid.velocity, dist_normalised)
neighbours.append(boid_i)
if angle:
# radians = math.acos(dot)
# radians = math.atan2(dist_normalised.y, dist_normalised.x)
if dot > 1.0:
dot = 1.0
if dot < -1.0:
dot = 1.0
current_angle = math.acos(dot)
if abs(current_angle) > boid.eyesight:
continue
angle_neighbours.append(boid_i)
return neighbours, angle_neighbours
def _calc_separation_vec(boid, neighbours):
separation_vec = Vec2()
if neighbours:
for neighbour in neighbours:
diff = sub_vec2(neighbour.pos, boid.pos)
if len2_vec(diff) < boid.separation_distance2:
separation_vec = sub_vec2(separation_vec, diff)
separation_vec = mul_vec2(separation_vec, SEPRATION_STRENGTH)
return separation_vec
def _calc_alignment_vec(boid, neighbours):
average_direction = Vec2()
if neighbours:
for neighbour in neighbours:
average_direction = add_vec2(average_direction, neighbour.velocity)
average_direction = div_vec(average_direction, len(neighbours))
average_direction = div_vec(sub_vec2(average_direction, boid.velocity), 8.0)
# alignment_vec = average_direction
# alignment_vec = normalized_vec(alignment_vec)
return average_direction
def _calc_cohesion(boid, neighbours):
average_pos = Vec2()
if neighbours:
for neighbour in neighbours:
average_pos = add_vec2(average_pos, neighbour.pos)
average_pos = div_vec(average_pos, len(neighbours))
average_pos = sub_vec2(average_pos, boid.pos)
average_pos = mul_vec2(average_pos, COHESION_STRENGTH)
return average_pos
def _limit_velocity(env, velocity):
if len_vec(velocity) > env.max_speed:
return mul_vec2(normalized_vec(velocity), env.max_speed)
return Vec2(velocity.x, velocity.y)
def tick_boid(environment, boid):
all_neighbours, neighbours = get_neighbours(environment, boid)
# separation
separation_vec = _calc_separation_vec(boid, all_neighbours)
separation_vec = mul_vec2(separation_vec, boid.separation)
# alignment
alignment_vec = _calc_alignment_vec(boid, neighbours)
alignment_vec = mul_vec2(alignment_vec, boid.alignment)
cohesion_vec = _calc_cohesion(boid, neighbours)
cohesion_vec = mul_vec2(cohesion_vec, boid.cohesion)
velocity = boid.velocity
velocity = add_vec2(velocity, separation_vec)
velocity = add_vec2(velocity, alignment_vec)
velocity = add_vec2(velocity, cohesion_vec)
velocity = add_vec2(velocity, _bound_position(environment, boid))
velocity = _limit_velocity(environment, velocity)
boid.velocity = velocity
boid.pos = add_vec2(boid.pos, velocity)
# bound_v = _bound_position(environment, boid)
# boid.velocity = bound_v
def _bound_position(env, boid):
rect = env.rect
xmin, xmax, ymin, ymax = rect.x, rect.x + rect.width, rect.y, rect.y + rect.height
vec = Vec2()
if boid.pos.x < xmin:
vec.x = PUSH_BACK_SPEED
elif boid.pos.x > xmax:
vec.x = -PUSH_BACK_SPEED
if boid.pos.y < ymin:
vec.y = PUSH_BACK_SPEED
elif boid.pos.y > ymax:
vec.y = -PUSH_BACK_SPEED
return vec
def tick_move_boids(environment, boid):
pass
# move
# new_pos = add_vec2(boid.pos, boid.velocity)
# bound_v = _bound_position(environment, boid)
# boid.velocity = bound_v
# new_pos.x = new_pos.x % environment.width
# new_pos.y = new_pos.y % environment.height
# boid.pos = new_pos
def tick_environment(environment):
if environment.is_running:
for boid in environment.boids:
tick_boid(environment, boid)
def rotate(vec, angle):
x = vec.x * math.cos(angle) - vec.y * math.sin(angle)
y = vec.x * math.sin(angle) + vec.y * math.cos(angle)
return Vec2(x=x, y=y)
def perpendicular_vec_clockwise(vec):
return Vec2(x=vec.y, y=-vec.x)
def perpendicular_vec_counter_clockwise(vec):
return Vec2(x=-vec.y, y=vec.x)
def _draw_boid_debug_line(painter, boid, vec):
dir_draw = add_vec2(boid.pos, vec)
painter.drawLine(boid.pos.x, boid.pos.y, dir_draw.x, dir_draw.y)
def _draw_boid_debug_circle(painter, boid, value):
painter.drawEllipse(boid.pos.x - value, boid.pos.y - value, value * 2.0,
value * 2.0)
def draw_boid(painter, environment, boid):
size = BOID_SIZE
half_size = size / 2.0
if len_vec(boid.velocity) != 0:
direction = normalized_vec(boid.velocity)
else:
direction = Vec2(1.0, 0.0)
end_point = add_vec2(boid.pos, mul_vec2(direction, size * 1.5))
# painter.drawEllipse(boid.pos.x - 10, boid.pos.y - 10, 20, 20)
# painter.drawLine(boid.pos.x, boid.pos.y, end_point.x, end_point.y)
side_1 = add_vec2(boid.pos, mul_vec2(perpendicular_vec_clockwise(direction), half_size))
side_2 = add_vec2(boid.pos, mul_vec2(perpendicular_vec_counter_clockwise(direction), half_size))
pen = QPen()
color = QColor()
if boid.debug:
color.setRgb(200, 0, 0)
else:
color.setRgb(0, 0, 0)
pen.setColor(color)
painter.setPen(pen)
painter.drawLine(side_2.x, side_2.y, end_point.x, end_point.y)
painter.drawLine(side_1.x, side_1.y, end_point.x, end_point.y)
# painter.drawLine(side_1.x, side_1.y, side_2.x, side_2.y)
if boid.debug:
max_dist = environment.neighbour_max_distance
pen.setColor(color)
_draw_boid_debug_line(painter, boid, mul_vec2(direction, max_dist))
# draw direction
sight_one = mul_vec2(rotate(direction, boid.eyesight), max_dist)
sight_two = mul_vec2(rotate(direction, -boid.eyesight), max_dist)
_draw_boid_debug_line(painter, boid, sight_one)
_draw_boid_debug_line(painter, boid, sight_two)
# draw range
_draw_boid_debug_circle(painter, boid, max_dist)
_draw_boid_debug_circle(painter, boid, boid.separation_distance)
# draw detected neighbourds
neighbours, angle_neighbours = get_neighbours(environment, boid)
for neighbour in angle_neighbours:
_draw_boid_debug_circle(painter, neighbour, 2)
if angle_neighbours:
separation_vec = _calc_separation_vec(boid, angle_neighbours)
color = QColor()
color.setRgb(200, 0, 200)
pen.setColor(color)
painter.setPen(pen)
_draw_boid_debug_line(painter, boid, mul_vec2(separation_vec, 1))
alignment_vec = _calc_alignment_vec(boid, angle_neighbours)
color = QColor()
color.setRgb(0, 200, 200)
pen.setColor(color)
painter.setPen(pen)
_draw_boid_debug_line(painter, boid, mul_vec2(alignment_vec, 50))
cohesion = _calc_cohesion(boid, angle_neighbours)
color = QColor()
color.setRgb(0, 120, 0)
pen.setColor(color)
painter.setPen(pen)
_draw_boid_debug_line(painter, boid, mul_vec2(cohesion, 1))
def _make_boid(rect, speed):
xmin, xmax, ymin, ymax = rect.x, rect.x + rect.width, rect.y, rect.y + rect.height
boid = Boid()
boid.pos.x = float(random.randint(xmin, xmax))
boid.pos.y = float(random.randint(ymin, ymax))
dir = Vec2((random.random() * 2.0) - 1.0, (random.random() * 2.0) - 1.0)
boid.velocity = mul_vec2(normalized_vec(dir), speed)
# boid.velocity = 0.5
boid.debug = False
return boid
def _make_environment(total, rect, max_speed, separation):
env = BoidsEnvironment()
env.rect = rect
for i in range(total):
boid = _make_boid(env.rect, max_speed)
boid.debug = False
boid.separation = separation
env.boids.append(boid)
if len(env.boids) > 1:
env.boids[0].debug = DEBUG
env.boids[0].pos = Vec2(env.rect.x + env.rect.width / 2.0, env.rect.y + env.rect.height / 2.0)
return env
class BoidsWidget(QWidget):
def __init__(self):
super(BoidsWidget, self).__init__()
self.setWindowTitle('Boids')
self.environment = None
def set_boids_environment(self, environment):
self.environment = environment
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
# painter.translate(QPointF(-25, -25))
if self.environment:
rect = QRect(self.environment.rect.x, self.environment.rect.y, self.environment.rect.width,
self.environment.rect.height)
painter.drawRect(rect)
for boid in self.environment.boids:
draw_boid(painter, self.environment, boid)
def sizeHint(self):
return QSize(450, 450)
def showEvent(self, event):
self.update()
def _make_spinbox(default, min_value, max_value):
spinbox = QSpinBox()
spinbox.setMaximum(max_value)
spinbox.setMinimum(min_value)
spinbox.setValue(default)
return spinbox
def _make_double_spinbox(default, min_value, max_value, single_step):
spinbox = QDoubleSpinBox()
spinbox.setSingleStep(single_step)
spinbox.setMaximum(max_value)
spinbox.setMinimum(min_value)
spinbox.setValue(default)
return spinbox
def _change_total(environment, new_total):
total = len(environment.boids)
if total == new_total:
return environment
elif total > new_total:
environment.boids = environment.boids[:new_total]
else:
diff = new_total - total
for i in range(diff):
environment.boids.append(
_make_boid(environment.rect, environment.max_speed))
class BoidsApp(QWidget):
def __init__(self):
super(BoidsApp, self).__init__()
self.setWindowTitle('Boids')
self._boids_environment = BoidsEnvironment()
layout = QHBoxLayout()
self._boids_widget = BoidsWidget()
self._boids_widget.set_boids_environment(self._boids_environment)
self._boids_widget.setMinimumWidth(550)
self._form_layout = QFormLayout()
self._is_running_checkbox = QCheckBox()
self._is_running_checkbox.setChecked(True)
self._is_running_checkbox.stateChanged.connect(self.__is_running_changed)
self._tick_button = QPushButton()
self._tick_button.setText('Tick')
self._tick_button.clicked.connect(self.__do_tick)
self._total_spinbox = _make_spinbox(TOTAL_BOIDS, 0, 9999)
self._total_spinbox.valueChanged.connect(self.__total_changed)
self._max_speed_spinbox = _make_double_spinbox(DEFAULT_MAX_SPEED, 0.0, 9999.0, 0.1)
self._max_speed_spinbox.valueChanged.connect(self.__max_speed_changed)
self._eyesight_radius_spinbox = _make_double_spinbox(DEFAULT_MAX_DISTANCE, 0.0, 9999.0, 0.5)
self._eyesight_radius_spinbox.valueChanged.connect(self.__eyesight_radius_changed)
self._eyesight_angle_spinbox = _make_double_spinbox(DEFAULT_EYE_SIGHT_ANGLE, 0.0, 360.0, 1.0)
self._eyesight_angle_spinbox.valueChanged.connect(self.__eyesight_angle_changed)
self._separation_distance_spinbox = _make_double_spinbox(SEPARATION_DISTANCE, 0.0, 9999.0, 1.0)
self._separation_distance_spinbox.valueChanged.connect(self.__separation_distance_changed)
self._separation_value_spinbox = _make_double_spinbox(SEPARATION_VALUE, 0.0, 1.0, 0.1)
self._separation_value_spinbox.valueChanged.connect(self.__separation_value_changed)
self._cohesion_value_spinbox = _make_double_spinbox(COHESION_VALUE, 0.0, 1.0, 0.1)
self._cohesion_value_spinbox.valueChanged.connect(self.__cohesion_value_changed)
self._alignment_value_spinbox = _make_double_spinbox(ALIGNMENT_VALUE, 0.0, 1.0, 0.1)
self._alignment_value_spinbox.valueChanged.connect(self.__alignment_value_changed)
self._generate_button = QPushButton()
self._generate_button.setText('Generate')
self._generate_button.clicked.connect(self._generate_environment)
self._form_layout.addRow('Total Boids', self._total_spinbox)
self._form_layout.addRow('Separation', self._separation_value_spinbox)
self._form_layout.addRow('Cohesion', self._cohesion_value_spinbox)
self._form_layout.addRow('Alignment', self._alignment_value_spinbox)
self._form_layout.addRow('Max Speed', self._max_speed_spinbox)
self._form_layout.addRow('Eyesight Radius', self._eyesight_radius_spinbox)
self._form_layout.addRow('Eyesight Angle (degrees)', self._eyesight_angle_spinbox)
self._form_layout.addRow('Separation Distance', self._separation_distance_spinbox)
self._form_layout.addRow('Running', self._is_running_checkbox)
self._form_layout.addWidget(self._generate_button)
self._form_layout.addWidget(self._tick_button)
layout.addWidget(self._boids_widget)
layout.addLayout(self._form_layout)
self.setLayout(layout)
self.tick_timer = QTimer()
self.tick_timer.setInterval(int(1000 / 24))
self.tick_timer.timeout.connect(self._tick)
self.tick_timer.start()
def showEvent(self, event):
self._generate_environment()
def _tick(self):
tick_environment(self._boids_environment)
self._boids_widget.update()
def __do_tick(self):
is_running = self._boids_environment.is_running
self._boids_environment.is_running = True
tick_environment(self._boids_environment)
self._boids_widget.update()
self._boids_environment.is_running = is_running
def __total_changed(self):
total = self._total_spinbox.value()
_change_total(self._boids_environment, total)
def __cohesion_value_changed(self):
value = self._cohesion_value_spinbox.value()
for boid in self._boids_environment.boids:
boid.cohesion = value
def __alignment_value_changed(self):
value = self._alignment_value_spinbox.value()
for boid in self._boids_environment.boids:
boid.alignment = value
def __max_speed_changed(self):
self._boids_environment.max_speed = self._max_speed_spinbox.value()
def __eyesight_radius_changed(self):
value = self._eyesight_radius_spinbox.value()
self._boids_environment.neighbour_max_distance = value
self._boids_environment.neighbour_max_distance2 = value * value
def __is_running_changed(self):
self._boids_environment.is_running = self._is_running_checkbox.isChecked()
def __separation_distance_changed(self):
value = self._separation_distance_spinbox.value()
for boid in self._boids_environment.boids:
boid.separation_distance = value
boid.separation_distance2 = value * value
def __separation_value_changed(self):
value = self._separation_value_spinbox.value()
for boid in | |
tried to reanalyze on GPU with reanalyze off. I'll fix it later. Perhaps something like 'self.use_last_model_value, self.reanalyse_on_gpu = True' in one line in the game file
self.config.train_on_gpu
+ self.config.num_workers * self.config.selfplay_on_gpu
+ log_in_tensorboard * self.config.selfplay_on_gpu
+ self.config.use_last_model_value * self.config.reanalyse_on_gpu
)
# Similar to what we did in __init__
if 1 < num_gpus_per_worker:
num_gpus_per_worker = math.floor(num_gpus_per_worker)
else:
num_gpus_per_worker = 0
# Just tried running this thing, and I was actually able to use the train option! It didn't really do anything, and it went right back to the option menu immediately after running, but it didn't give me an error! It even created a "results" directory and saved a result! Cool!
# Initialize training workers.
self.training_worker = trainer.Trainer.options( # .Trainer is defined in trainer.py, .options is defined in ray.
num_cpus=0,
num_gpus=num_gpus_per_worker if self.config.train_on_gpu else 0,
).remote(self.checkpoint, self.config)
### Since I had no trainer.py yet, I got ModuleNotFoundError: No module named 'trainer' after running what I had so far, so I decided to code that next. Head on over there!
# Initialize storage workers
self.shared_storage_worker = shared_storage.SharedStorage.remote(self.checkpoint, self.config)
self.shared_storage_worker.set_info.remote("terminate", False)
# Initialize replay buffer workers.
self.replay_buffer_worker = replay_buffer.ReplayBuffer.remote(self.checkpoint, self.replay_buffer, self.config)
### Since I had no replay_buffer.py yet, I got ModuleNotFoundError: No module named 'replay_buffer' after running this. I decided to go code the init for that next, so head on over to replay_buffer.py!
# Part of Reanalyze, use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze). We probably won't reanalyze due to the nature of our environment.
if self.config.use_last_model_value:
self.reanalyse_worker = replay_buffer.Reanalyze.options(
num_cpus=0,
num_gpus=num_gpus_per_worker if self.config.reanalyse_on_gpu else 0, # I know reanalyze is spelled wrong here, but it would just be a pain to correct all the misspellings, so for now I will leave some of them as they are.
).remote(self.checkpoint, self.config)
# Initialize a list of self play workers for seed in range(self.config.num_workers)
self.self_play_workers = [
self_play.SelfPlay.options(
num_cpus=0,
num_gpus=num_gpus_per_worker if self.config.selfplay_on_gpu else 0,
).remote(self.checkpoint, self.Game, self.config, self.config.seed + seed)
for seed in range(self.config.num_workers)
]
### No self_play.py yet, so got ModuleNotFoundError: No module named 'self_play'. Decided to create the init for that, too, so head on over there!
# Launch workers
[
self_play_worker.continuous_self_play.remote(self.shared_storage_worker, self.replay_buffer_worker) # I don't know why this is in a list like this, but whatever.
for self_play_worker in self.self_play_workers
]
self.training_worker.continuous_update_weights.remote(self.replay_buffer_worker, self.shared_storage_worker)
if self.config.use_last_model_value:
self.reanalyse_worker.reanalyse.remote(self.replay_buffer_worker, self.shared_storage_worker)
if log_in_tensorboard:
self.logging_loop(num_gpus_per_worker if self.config.selfplay_on_gpu else 0)
### Alright, we're done with the train function! Kind of! Okay, we got an error telling us: AttributeError: 'ActorHandle' object has no attribute 'continuous_self_play'
### So let's go over to self_play.py and implement that!
### Okay, after implementing self_play.py, I tried to run it again, but this time I got an error telling me the Reanalyze class has no reanalyze function,
### and I realized that I needed to finish the Reanalyze class.
### But I also realized that there were a bunch of functions and classes and things that muzero.py relied on in both replay_buffer.py and trainer.py,
### so instead of jumping around and patching holes here and there, I'm just going to finish ALL of replay_buffer.py and then ALL of trainer.py,
### So let's head on over to replay_buffer.py, under the init, and finish everything up there. We're a little over halfway done with the whole thing, I think!
### Okay, we've finally finished both replay_buffer.py and trainer.py! But after running the code, I still got an error:
### AttributeError: 'MuZero' object has no attribute 'logging_loop'
### Whoops. Forgot about that. Fortunately, though, we don't have to go to another file to implement that, so keep reading and let's get it done!
def logging_loop(self, num_gpus):
"""
Use TensorBoard to keep track of training performance.
"""
# Launch the test worker to get performance metrics
self.test_worker = self_play.SelfPlay.options(num_cpus=0, num_gpus=num_gpus).remote( # Sorry I split this up, but it was too long for one line even for me.
self.checkpoint, self.Game, self.config, self.config.seed + self.config.num_workers
)
self.test_worker.continuous_self_play.remote(self.shared_storage_worker, None, True)
# Write everything in TensorBoard
writer = SummaryWriter(self.config.results_path) # Creates a `SummaryWriter` that will write out events and summaries to the event file
print("\nTraining...\nRun 'tensorboard --logdir ./results' and go to http://localhost:6006/ to see in real time the training performance.\n")
# Save hyperparameters to TensorBoard
hp_table = [
f"| {key} | {value} |" for key, value in self.config.__dict__.items()
]
writer.add_text(
"Hyperparameters",
"| Parameter | Value |\n|-------|-------|\n" + "\n".join(hp_table)
)
# Save model representation
writer.add_text("Model summary", self.summary)
# Loop for updating the training performance
counter = 0 # Initialize counter for... something? So we can keep track of how many times the upcoming loop has iterated?
keys = [
"total_reward",
"muzero_reward",
"opponent_reward",
"episode_length",
"mean_value",
"training_step",
"lr",
"total_loss",
"value_loss",
"reward_loss",
"policy_loss",
"num_played_games",
"num_played_steps",
"num_reanalysed_games",
]
info = ray.get(self.shared_storage_worker.get_info.remote(keys)) # Get info to determine training step.
# For each training step, save a bunch of data to TensorBoard. Save rewards, worker info, and losses.
try:
while info["training_step"] < self.config.training_steps:
info = ray.get(self.shared_storage_worker.get_info.remote(keys))
writer.add_scalar("1.Total_reward/1.Total_reward", info["total_reward"], counter)
writer.add_scalar("1.Total_reward/2.Mean_value", info["mean_value"], counter)
writer.add_scalar("1.Total_reward/3.Episode_length", info["episode_length"], counter)
writer.add_scalar("1.Total_reward/4.MuZero_reward", info["muzero_reward"], counter)
writer.add_scalar("1.Total_reward/5.Opponent_reward", info["opponent_reward"], counter) # In his code, this scalar was spread over three lines, rather than one, like the rest. Once again, code generator?
writer.add_scalar("2.Workers/1.Self_played_games", info["num_played_games"], counter)
writer.add_scalar("2.Workers/2.Training_steps", info["training_step"], counter) # Or perhaps sloppy code style? He typically had a comma after the 'counter' argument, but this time he didn't.
writer.add_scalar("2.Workers/3.Self_played_steps", info["num_played_steps"], counter)
writer.add_scalar("2.Workers/4.Reanalysed_games", info["num_reanalysed_games"], counter)
writer.add_scalar("2.Workers/5.Training_steps_per_self_played_step_ratio", info["training_step"] / max(1, info["num_played_steps"]), counter)
writer.add_scalar("2.Workers/6.Learning_rate", info["lr"], counter)
writer.add_scalar("3.Loss/1.Total_weighted_loss", info["total_loss"], counter)
writer.add_scalar("3.Loss/Value_loss", info["value_loss"], counter)
writer.add_scalar("3.Loss/Reward_loss", info["reward_loss"], counter)
writer.add_scalar("3.Loss/Policy_loss", info["policy_loss"], counter)
print(
f'Last test reward: {info["total_reward"]:.2f}. Training step: {info["training_step"]}/{self.config.training_steps}. Played games: {info["num_played_games"]}. Loss: {info["total_loss"]:.2f}',
end="\r",
)
counter += 1
time.sleep(0.5)
# So wait, this is actually pretty big! KeyboardInterrupt means if we hit ctrl+C here, we pass AND SKIP THE REST OF THE FUNCTION.
# This is important because hitting ctrl+C here SKIPS THE PART WHERE WE SAVE THE REPLAY BUFFER TO DISK! That means we'll lose all our progress, right?!
# This may be why I lost all my progress when ctrl+C-ing in the middle of training for atari breakout, to try to save some progress before the thing crashed.
# So if we move the save_model part INTO THE EXCEPTION, would it still save the model if we hit ctrl+C?
# It also doesn't properly terminate the workers, as is.
except KeyboardInterrupt:
pass
self.terminate_workers()
if self.config.save_model:
# Persist(?) replay buffer to disk
print("\n\nPersisting replay buffer games to disk...")
# Serialize the replay buffer data in the curly braces and save it to the file we open.
pickle.dump( # pickle serializes, or "pickles", a python object
{
"buffer": self.replay_buffer,
"num_played_games": self.checkpoint["num_played_games"],
"num_played_steps": self.checkpoint["num_played_steps"],
"num_reanalysed_games": self.checkpoint["num_reanalysed_games"],
},
open(os.path.join(self.config.results_path, "replay_buffer.pkl"), "wb")
)
def terminate_workers(self):
"""
Softly terminate the running tasks and garbage collect the workers.
"""
if self.shared_storage_worker:
self.shared_storage_worker.set_info.remote("terminate", True)
self.checkpoint = ray.get(self.shared_storage_worker.get_checkpoint.remote())
if self.replay_buffer_worker:
self.replay_buffer = ray.get(self.replay_buffer_worker.get_buffer.remote())
print("\nShutting down workers...")
self.self_play_workers = None
self.test_worker = None
self.training_worker = None
self.reanalyse_worker = None
self.replay_buffer_worker = None
self.shared_storage_worker = None
### Alright, I think we've got everything we need to train this thing! We'll implement test() and all the other stuff later, but for now lets see if we can start
### training it to play a game.
### Okay, got some errors, but I think they were because of minor mistakes I made in copying the code. Also, forgot to implement scalar_to_support in models.py, so I went over and did that.
### IT'S WORKING! IT'S WORKING!!! I'll probably comb over the code one last time just to be sure I didn't mess anything up, but it's actually training! Awesome!
### Actually, before I comb over all the code again, let's finish the rest of muzero.py and then implement resnet in models.py. That way we can let it train for a while and test it to make sure it actually works. This is so awesome!
def test(self, render=True, | |
<filename>llgeo/props_nonlinear/darendeli_2011.py
''' Darendeli Non-linear Properties for Equivalent-Linear Ground Response Analyses
DESCRIPTION:
This module contains functions related to the non-linear dynamic properties of
soils, specifically geared towards use in equivalent-linear ground response
analyses. The curves specified by Darendeli (2001) are coded here and can
be used for a variety of applications.
FUNCTIONS:
This module contains the following functions:
* params: calculates parameters for Darendeli curves, based on soil props
* curves: returns Darendeli curves, based on calcualted params
'''
# ------------------------------------------------------------------------------
# Import Modules
# ------------------------------------------------------------------------------
# Standard libraries
import numpy as np
import warnings
# LLGEO Modules
import llgeo.utilities.check_errors as llgeo_errs
# ------------------------------------------------------------------------------
# Main Functions
# ------------------------------------------------------------------------------
def params(PI, OCR, sigp_o, N = 10, load_freq = 1, type = 'mean'):
''' Calculates parameters for Darendeli curves, based on soil properties
Purpose
-------
Given the set of soil properties, calculates the parameters needed to obtain
mean modulus reduction and damping curves according to Darendeli (2011).
Parameters
-----------
PI (%) : float
Soil Plasticity Index
OCR (-) : float
Overconsolidation Ratio
sigp_o (atm) : float
In-situ mean effective confining stress (sigma'o)
N (-) : float, optional
Number of loading cycles. Not critical, defaults to 10 cycles.
load_freq (Hz) : float, optional
Loading frequency. Not critical, defaults to 1 Hz.
Returns
-------
a (-) : float
Curvature coefficient (set as constant Phi_5)
b (-) : float
Scaling coefficient on material damping curve
D_min (dec) : float
Small strain damping. Ignores effect of high-amplitude cycling on Dmin
(see Section 6.3 and Pg 144 in Ref_1)
sstrn_r (dec) : float
Reference strain, corresponds to the strain amplitude when shear modulus
reduced to one half of Gmax (key characteristic of the hyperbolic model
employed in Darendeli's research). See Section 6.2, Pg. 132 in Ref_1.
Note
----
* This is done separately to explore the dependence of the model parameters
on the soil properties.
References
----------
(1) Darendeli, <NAME>. (2001). Development of a New Family of
Normalized Modulus Reduction and Material Damping Curves. 393.
See : Section 7.4.1 Page 172, in Ref(1)
Table 8.12, Page 214, in Ref(1)
'''
# Phi constants (1-indexed to match equation notations)
# These are calibrated to all credible data from Darendeli
if type == 'mean':
phis = [np.nan, 0.0352, 0.0010, 0.3246, 0.3483, 0.9190, 0.8005, 0.0129,
-0.1069, -0.2889, 0.2919, 0.6329, -0.0057]
# Curvature coefficient (Eq. 7.26b)
a = phis[5]
# Scaling coefficient (Eq. 7.28b)
b = phis[11] + phis[12] * np.log(N)
# Minimum damping (Eq. 7.28a)
D_min = (phis[6] + phis[7] * PI * OCR ** phis[8]) * sigp_o ** phis[9] * \
(1 + phis[10] * np.log(load_freq))
# Reference strain (gamma_r in equations) (Eq. 7.26a)
sstrn_r = (phis[1] + phis[2] * PI * OCR ** phis[3]) * sigp_o ** phis[4]
return(a, b, D_min, sstrn_r)
def curves(sstrn, PI, OCR, sigp_o, N = 10, load_freq = 1, type = 'mean'):
''' Mean modulus reduction and damping curves
Purpose
-------
Generate mean modulus reduction and material damping curves for a given set
of material properties, following Darendeli(2011). Uses the results that
were calibrated to all the collected data, as recommended by author.
Parameters
-----------
sstrn (dec) : array
Shearing strains of interest (in %, not dec)
Any numpy array will work, but should probably be log-spaced!
a (-) : float
Curvature coefficient (set as constant Phi_5)
b (-) : float
Scaling coefficient on material damping curve
D_min (dec) : float
Small strain damping. Ignores effect of high-amplitude cycling on Dmin
(see Section 6.3 and Pg 144 in Ref_1)
sstrn_r (dec) : float
Reference strain, corresponds to the strain amplitude when shear modulus
reduced to one half of Gmax (key characteristic of the hyperbolic model
employed in Darendeli's research). See Section 6.2, Pg. 132 in Ref_1.
Returns
-------
G_red (dec) : array
MEAN modulus reduction curve for material properties.
Each value corresponds to shear strain levels given by sstrn
D_adj (dec) : array
MEAN damping curve for given properties.
Each value corresponds to shear strain levels given by sstrn
Note that these are percentage values (not dec)
Notes
-----
* Be careful about providing mean effective confining pressure in atm!
Refs
----
(1) Darendeli, <NAME>. (2001). Development of a New Family of
Normalized Modulus Reduction and Material Damping Curves. 393.
See : Section 7.4.1 Page 172, in Ref(1)
Table 8.12, Page 214, in Ref(1)
'''
a, b, D_min, sstrn_r = params(PI, OCR, sigp_o, N, load_freq, type)
# Normalized modulus reduction curve (Eq. 7.25)
G_red = 1 / (1 + (sstrn / sstrn_r)**a)
# Damping constants (Eq. 7.27) ~ one-indexed
c = [np.nan, -1.1143 * a ** 2 + 1.8618 * a + 0.2523,
+0.0805 * a ** 2 - 0.0710 * a - 0.0095,
-0.0005 * a ** 2 + 0.0002 * a + 0.0003 ]
# Masing damping (Eq. 7.27)
D_mas_a1 = 100 / np.pi * \
(4 * (sstrn - sstrn_r * np.log((sstrn + sstrn_r) / sstrn_r)) / \
(sstrn**2 / (sstrn + sstrn_r)) - 2)
D_mas = c[1] * D_mas_a1 + c[2] * D_mas_a1**2 + c[3] * D_mas_a1**3
# Adjuted damping (final one!) (Eq. 7.27)
# Note that notation is a bit different in Pg. 214 and 174
D_adjs = b * G_red**0.1 * D_mas + D_min
return G_red, D_adjs
# ------------------------------------------------------------------------------
# Helper Functions - nothing to see here :)
# ------------------------------------------------------------------------------
def check_darendeli_args(PI, sigp_o, OCR, N, load_freq, ctype):
''' Does basic error checking for darendeli arguments
TODO-soon: integrate this into the curves, figure out how to handle logging'''
# Reference to Darendeli's thesis for error references
ref_01 = 'Darendeli (2001) Development of a New Family of Normalized ...'
# Initialize lists that will contain conditions, and messages
conds = [] # Conditions under which error is flagged
mssgs = [] # Error message to be displayed
terrs = [] # Type of error (1 = warning, 0 = fatal)
# Plasticity index must be greater than zero
conds += [PI < 0]
terrs += ['fatal']
mssgs += ['\n\n\t PI = {:4.1f} %'.format(PI) + \
'\n\t\t Must be greater than or equal to zero']
# Mean confining stress must be greater than zero
conds += [sigp_o <= 0]
terrs += ['fatal']
mssgs += ['\n\n\t sigp_o = {:6.2f} atm'.format(sigp_o) + \
'\n\t\t Must be greater than zero']
# OCR must be greater than zero
conds += [OCR <= 0]
terrs += ['fatal']
mssgs += ['\n\n\t OCR = {:6.2f} '.format(OCR) + \
'\n\t\t Must be greater than zero']
# Number of cycles must be greater than zero
conds += [N <= 0]
terrs += ['fatal']
mssgs += ['\n\n\t N = {:6.2f} '.format(N) + \
'\n\t\t Must be greater than zero']
# Load frequency must be greater than zero
conds += [load_freq <= 0]
terrs += ['fatal']
mssgs += ['\n\n\t load_freq = {:6.2f} Hz'.format(load_freq) + \
'\n\t\t Must be greater than zero']
# Check that the type of curves are the mean
conds += [ctype != 'mean']
terrs += ['fatal']
mssgs += ['\n\n\t ctype = {:s}'.format(ctype) + \
'\n\t\t So far, only mean curves have been coded :(']
# Limit on database for mean confining stress
conds += [sigp_o > 27.2]
terrs += ['warn']
mssgs += ['\n\n\t sigp_o = {:6.2f} atm.'.format(sigp_o) + \
'\n\t\t Darendeli tested up to stresses of 27.2 atm' + \
'\n\t\t See Figure 3.9, Page 42, in ref: ' + \
'\n\t\t ' + ref_01 ]
# Limit on database for plasticity index
conds += [PI > 132]
terrs += ['warn']
mssgs += ['\n\n\t PI = {:4.1f} %'.format(PI) + \
'\n\t\t Darendeli test up to plasticity index of 132% ' + \
'\n\t\t See Figure 3.11, Page 44, in ref: ' + \
'\n\t\t ' + ref_01]
# Limit on database for overconsolidation ratio
conds += [OCR > 8]
terrs += ['warn']
mssgs += ['\n\n\t OCR = {:4.1f} %'.format(OCR) +\
'\n\t\t Darendeli test up to overconsolidation ratios of 8' +\
'\n\t\t See Figure 3.19, | |
x1, -1, [[0]])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1])
self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3])
# rank 2
x2 = np.arange(12).reshape((4,3))
# for axis=0 zi.shape should == (max(len(a),len(b))-1, 3)
self.base_bad_size_zi([1], [1], x2, 0, [0])
# for each of these there are 5 cases tested (in this order):
# 1. not deep enough, right # elements
# 2. too deep, right # elements
# 3. right depth, right # elements, transposed
# 4. right depth, too few elements
# 5. right depth, too many elements
self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
# for axis=1 zi.shape should == (4, max(len(a),len(b))-1)
self.base_bad_size_zi([1], [1], x2, 1, [0])
self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
def test_empty_zi(self):
# Regression test for #880: empty array for zi crashes.
x = self.generate((5,))
a = self.convert_dtype([1])
b = self.convert_dtype([1])
zi = self.convert_dtype([])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
assert_equal(zf.dtype, self.dtype)
assert_equal(zf.size, 0)
# def test_lfiltic_bad_zi(self):
# # Regression test for #3699: bad initial conditions
# a = self.convert_dtype([1])
# b = self.convert_dtype([1])
# # "y" sets the datatype of zi, so it truncates if int
# zi = lfiltic(b, a, [1., 0])
# zi_1 = lfiltic(b, a, [1, 0])
# zi_2 = lfiltic(b, a, [True, False])
# assert_array_equal(zi, zi_1)
# assert_array_equal(zi, zi_2)
def test_short_x_FIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([7, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_short_x_IIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1, 1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([-67, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_do_not_modify_a_b_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
b0 = b.copy()
a = self.convert_dtype([0.5, -0.5])
a0 = a.copy()
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
def test_do_not_modify_a_b_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, 1])
b0 = b.copy()
a = self.convert_dtype([2])
a0 = a.copy()
y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
class TestLinearFilterFloat32(_TestLinearFilter):
dtype = np.dtype('f')
class TestLinearFilterFloat64(_TestLinearFilter):
dtype = np.dtype('d')
class TestLinearFilterFloatExtended(_TestLinearFilter):
dtype = np.dtype('g')
class TestLinearFilterComplex64(_TestLinearFilter):
dtype = np.dtype('F')
class TestLinearFilterComplex128(_TestLinearFilter):
dtype = np.dtype('D')
class TestLinearFilterComplexExtended(_TestLinearFilter):
dtype = np.dtype('G')
class TestLinearFilterDecimal(_TestLinearFilter):
dtype = np.dtype('O')
def type(self, x):
return Decimal(str(x))
class TestLinearFilterObject(_TestLinearFilter):
dtype = np.dtype('O')
type = float
def test_lfilter_bad_object():
# lfilter: object arrays with non-numeric objects raise TypeError.
# Regression test for ticket #1452.
assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0])
assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0])
assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0])
class TestLFilterZI(object):
def test_basic(self):
a = np.array([1.0, -1.0, 0.5])
b = np.array([1.0, 0.0, 2.0])
zi_expected = np.array([5.0, -1.0])
zi = lfilter_zi(b, a)
assert_array_almost_equal(zi, zi_expected)
def test_scale_invariance(self):
# Regression test. There was a bug in which b was not correctly
# rescaled when a[0] was nonzero.
b = np.array([2, 8, 5])
a = np.array([1, 1, 8])
zi1 = lfilter_zi(b, a)
zi2 = lfilter_zi(2*b, 2*a)
assert_allclose(zi2, zi1, rtol=1e-12)
class TestFiltFilt(object):
filtfilt_kind = 'tf'
def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None,
method='pad', irlen=None):
if self.filtfilt_kind == 'tf':
b, a = zpk2tf(*zpk)
return filtfilt(b, a, x, axis, padtype, padlen, method, irlen)
elif self.filtfilt_kind == 'sos':
sos = zpk2sos(*zpk)
return sosfiltfilt(sos, x, axis, padtype, padlen)
def test_basic(self):
zpk = tf2zpk([1, 2, 3], [1, 2, 3])
out = self.filtfilt(zpk, np.arange(12))
assert_allclose(out, np.arange(12), atol=1e-11)
def test_sine(self):
rate = 2000
t = np.linspace(0, 1.0, rate + 1)
# A signal with low frequency and a high frequency.
xlow = np.sin(5 * 2 * np.pi * t)
xhigh = np.sin(250 * 2 * np.pi * t)
x = xlow + xhigh
zpk = butter(8, 0.125, output='zpk')
# r is the magnitude of the largest pole.
r = np.abs(zpk[1]).max()
eps = 1e-5
# n estimates the number of steps for the
# transient to decay by a factor of eps.
n = int(np.ceil(np.log(eps) / np.log(r)))
# High order lowpass filter...
y = self.filtfilt(zpk, x, padlen=n)
# Result should be just xlow.
err = np.abs(y - xlow).max()
assert_(err < 1e-4)
# A 2D case.
x2d = np.vstack([xlow, xlow + xhigh])
y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1)
assert_equal(y2d.shape, x2d.shape)
err = np.abs(y2d - xlow).max()
assert_(err < 1e-4)
# Use the previous result to check the use of the axis keyword.
# (Regression test for ticket #1620)
y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0)
assert_equal(y2d, y2dt.T)
def test_axis(self):
# Test the 'axis' keyword on a 3D array.
x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)
zpk = butter(3, 0.125, output='zpk')
y0 = self.filtfilt(zpk, x, padlen=0, axis=0)
y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1)
assert_array_equal(y0, np.swapaxes(y1, 0, 1))
y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2)
assert_array_equal(y0, np.swapaxes(y2, 0, 2))
def test_acoeff(self):
if self.filtfilt_kind != 'tf':
return # only necessary for TF
# test for 'a' coefficient as single number
out = filtfilt([.5, .5], 1, np.arange(10))
assert_allclose(out, np.arange(10), | |
<reponame>lasconic/randomsheetmusic<gh_stars>1-10
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: spanner.py
# Purpose: The Spanner base-class and subclasses
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2010-2012 <NAME> and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
A spanner is a music21 object that represents a connection usually between
two or more music21 objects that might live in different streams but need
some sort of connection between them. A slur is one type of spanner -- it might
connect notes in different Measure objects or even between different parts.
This package defines some of the most common spanners. Other spanners
can be found in modules such as :ref:`moduleDynamics` (for things such as crescendos)
or in :ref:`moduleMeter` (a ritardando, for instance).
'''
import unittest
import copy
from music21 import exceptions21
from music21 import base
from music21 import common
from music21 import duration
from music21 import environment
_MOD = "spanner.py"
environLocal = environment.Environment(_MOD)
#-------------------------------------------------------------------------------
class SpannerException(exceptions21.Music21Exception):
pass
class SpannerBundleException(exceptions21.Music21Exception):
pass
#-------------------------------------------------------------------------------
class Spanner(base.Music21Object):
'''
Spanner objects live on Streams in the same manner as other Music21Objects,
but represent and store connections between one or more other Music21Objects.
Commonly used Spanner subclasses include the :class:`~music21.spanner.Slur`,
:class:`~music21.spanner.RepeatBracket`, :class:`~music21.spanner.Crescendo`, and :class:`~music21.spanner.Diminuendo`
objects.
In some cases you will want to subclass Spanner
for specific purposes.
In the first demo, we create
a spanner to represent a written-out accelerando, such
as <NAME> uses in his second string quartet (he marks them
with an arrow).
>>> class CarterAccelerandoSign(spanner.Spanner):
... pass
>>> n1 = note.Note('C4')
>>> n2 = note.Note('D4')
>>> n3 = note.Note('E4')
>>> sp1 = CarterAccelerandoSign(n1, n2, n3) # or as a list: [n1, n2, n3]
>>> sp1.getSpannedElements()
[<music21.note.Note C>, <music21.note.Note D>, <music21.note.Note E>]
We can iterate over a spanner to get the contexts:
>>> print(" ".join([repr(n) for n in sp1]))
<music21.note.Note C> <music21.note.Note D> <music21.note.Note E>
Now we put the notes and the spanner into a Stream object. Note that
the convention is to put the spanner at the beginning of the innermost
Stream that contains all the Spanners:
>>> s = stream.Stream()
>>> s.append([n1, n2, n3])
>>> s.insert(0, sp1)
Now we can get at the spanner in one of three ways.
(1) it is just a normal element in the stream:
>>> for e in s:
... print(e)
<music21.note.Note C>
<music21.CarterAccelerandoSign <music21.note.Note C><music21.note.Note D><music21.note.Note E>>
<music21.note.Note D>
<music21.note.Note E>
(2) we can get a stream of spanners (equiv. to getElementsByClass('Spanner'))
by calling the .spanner property on the stream.
>>> spannerCollection = s.spanners # a stream object
>>> for thisSpanner in spannerCollection:
... print(thisSpanner)
<music21.CarterAccelerandoSign <music21.note.Note C><music21.note.Note D><music21.note.Note E>>
(3) we can get the spanner by looking at the list getSpannerSites() on any object that has a spanner:
>>> n2.getSpannerSites()
[<music21.CarterAccelerandoSign <music21.note.Note C><music21.note.Note D><music21.note.Note E>>]
In this example we will slur a few notes and then iterate over the stream to
see which are slurred:
>>> n1 = note.Note('C4')
>>> n2 = note.Note('D4')
>>> n3 = note.Note('E4')
>>> n4 = note.Note('F4')
>>> n5 = note.Note('G4')
>>> n6 = note.Note('A4')
Create a slur over the second and third notes at instantiation:
>>> slur1 = spanner.Slur([n2, n3])
Slur the fifth and the sixth notes by adding them to an existing slur:
>>> slur2 = spanner.Slur()
>>> slur2.addSpannedElements([n5, n6])
Now add them all to a stream:
>>> part1 = stream.Part()
>>> part1.append([n1, n2, n3, n4, n5, n6])
>>> part1.insert(0, slur1)
>>> part1.insert(0, slur2)
Say we wanted to know which notes in a piece started a
slur, here's how we could do it:
>>> for n in part1.notes:
... ss = n.getSpannerSites()
... for thisSpanner in ss:
... if 'Slur' in thisSpanner.classes:
... if thisSpanner.isFirst(n):
... print(n.nameWithOctave)
D4
G4
Alternatively, you could iterate over the spanners
of part1 and get their first elements:
>>> for thisSpanner in part1.spanners:
... firstNote = thisSpanner.getSpannedElements()[0]
... print(firstNote.nameWithOctave)
D4
G4
The second method is shorter, but the first is likely to
be useful in cases where you are doing other things to
each note object along the way.
Oh, and of course, slurs do print properly in musicxml:
>>> #_DOCS_SHOW part1.show()
.. image:: images/slur1_example.*
:width: 400
(the Carter example would not print an arrow since that
element has no corresponding musicxml representation).
Implementation notes:
The elements that are included in a spanner are stored in a
Stream subclass called :class:`~music21.stream.SpannerStorage`
found as the `.spannerStorage` attribute. That Stream has an
attribute called `spannerParent` which links to the original spanner.
Thus, `spannerStorage` is smart enough to know where it's stored, but
it makes deleting/garbage-collecting a spanner a tricky operation:
Ex. Prove that the spannedElement Stream is linked to container via
`spannerParent`:
>>> sp1.spannerStorage.spannerParent is sp1
True
Spanners have a `.completeStatus` attribute which can be used to find out if
all spanned elements have been added yet. It's up to the processing agent to
set this, but it could be useful in deciding where to append a spanner.
>>> sp1.completeStatus
False
When we're done adding elements:
>>> sp1.completeStatus = True
'''
# this class attribute provides performance optimized class selection
isSpanner = True
def __init__(self, *arguments, **keywords):
base.Music21Object.__init__(self)
self._cache = {}
# store this so subclasses can replace
if self.__module__ != '__main__':
if self.__module__.startswith('music21') == False:
self._reprHead = '<music21.' + self.__module__ + '.' + self.__class__.__name__ + ' '
else:
self._reprHead = '<' + self.__module__ + '.' + self.__class__.__name__ + ' '
else:
self._reprHead = '<music21.spanner.' + self.__class__.__name__ + ' '
# store a Stream inside of Spanner
from music21 import stream
# create a stream subclass, spanner storage; pass a reference
# to this spanner for getting this spanner from the SpannerStorage
# directly
self.spannerStorage = stream.SpannerStorage(spannerParent=self)
# we do not want to auto sort based on offset or class, as
# both are meaningless inside of this Stream (and only have meaning
# in Stream external to this
self.spannerStorage.autoSort = False
# add arguments as a list or single item
proc = []
for arg in arguments:
if common.isListLike(arg):
proc += arg
else:
proc.append(arg)
self.addSpannedElements(proc)
# if len(arguments) > 1:
# self.spannerStorage.append(arguments)
# elif len(arguments) == 1: # assume a list is first arg
# self.spannerStorage.append(c)
# parameters that spanners need in loading and processing
# local id is the id for the local area; used by musicxml
self.idLocal = None
# after all spannedElements have been gathered, setting complete
# will mark that all parts have been gathered.
self.completeStatus = False
def __repr__(self):
msg = [self._reprHead]
for c in self.getSpannedElements():
objRef = c
msg.append(repr(objRef))
msg.append('>')
return ''.join(msg)
def _deepcopySubclassable(self, memo=None, ignoreAttributes=None, removeFromIgnore=None):
'''
see __deepcopy__ for tests and docs
'''
# NOTE: this is a performance critical operation
defaultIgnoreSet = {'_cache', 'spannerStorage'}
if ignoreAttributes is None:
ignoreAttributes = defaultIgnoreSet
else:
ignoreAttributes = ignoreAttributes | defaultIgnoreSet
new = super(Spanner, self)._deepcopySubclassable(memo, ignoreAttributes, removeFromIgnore)
if removeFromIgnore is not None:
ignoreAttributes = ignoreAttributes - removeFromIgnore
if 'spannerStorage' in ignoreAttributes:
for c in self.spannerStorage:
new.spannerStorage.append(c)
return new
def __deepcopy__(self, memo=None):
'''
This produces a new, independent object containing references to the same spannedElements.
SpannedElements linked in this Spanner must be manually re-set, likely using the
replaceSpannedElement() method.
Notice that we put the references to the same object so that later we can replace them;
otherwise in a deepcopy of a stream, the notes in the stream
will become independent from the notes in the spanner.
>>> import copy
>>> n1 = note.Note('g')
>>> n2 = note.Note('f#')
>>> c1 = clef.AltoClef()
>>> sp1 = spanner.Spanner(n1, n2, c1)
>>> sp2 = copy.deepcopy(sp1)
>>> len(sp2.spannerStorage)
3
>>> sp1 is sp2
False
>>> sp2[0] is sp1[0]
True
>>> sp2[2] is sp1[2]
True
>>> sp1[0] is n1
True
>>> sp2[0] is n1
True
'''
return self._deepcopySubclassable(memo)
#---------------------------------------------------------------------------
# as spannedElements | |
return np.array([sindex_HK1, sindex_HKn, sindex_HKm, sindex_HK2, sindex_HKp])
def polygon_area(self, xy, plot=0):
"""
This utility takes coordinates (x, y) ordered in an array and calculates the polygon area enclosed.
The coordinates needs to be ordered in a counter-clock-wise manner since the circuference using
Green's theorem is used to equate the polygon area.
"""
l = len(xy)
s = 0.0
for i in range(l):
j = (i+1)%l # keep index in [0,l)
s += (xy[j,0] - xy[i,0])*(xy[j,1] + xy[i,1])
#--------------------------------------------------------------
return -0.5*s
def results(self):
print('################################################')
print(' {} - {} '.format(self.target, self.date))
print('################################################')
head_SF = self.hdul[self.SF_dex[0]][0].header
head_FF = self.hdul[self.FF_dex[0]][0].header
print('Magnitude = {}, Seeing = {}'.format(self.magnitude, self.seeing))
print('Exptime flat: t = {} s'.format(head_SF['EXPTIME']))
print('Exptime star: t = {} s'.format(head_FF['EXPTIME']))
print('------------------------------------------------')
print(' CCD NOISE PROPERTIES ')
print('------------------------------------------------')
BF_mean, BF_std = np.mean(self.BF), np.std(self.BF)
DF_mean, DF_std = np.mean(self.DF), np.std(self.DF)
print('Bias master : mean = {:.4g}, std = {:.4g}'.format(BF_mean, BF_std))
print('Dark current: mean = {:.4g}, std = {:.4g}'.format(DF_mean, DF_std))
print('GAIN = {:.3g} e-/ADU'.format(self.gain))
print('RON = {:.3g} ADU'.format(BF_std))
print('VAR = {:.3g} ADU (=<RON^2>)'.format(BF_std**2))
print('------------------------------------------------')
print(' BACKGROUND SKY & SCATTER ')
print('------------------------------------------------')
print('Flat mean background counts: {:.1f}'.format(self.f_flux_sky))
print('Star mean background counts: {:.1f}'.format(self.s_flux_sky))
print('------------------------------------------------')
print(' RV CORRECTION ')
print('------------------------------------------------')
print('Barycentric RV correction: {:.2f} km/s'.format(self.delta_v_baryc))
print('Star motion RV Correction: {:.2f} km/s'.format(self.rv_amp))
print('Correction in velocity : {:.2f} km/s'.format(self.delta_v))
print('Correction in wavelength : {:.2f} Å'.format(self.delta_l))
print('Correction in pixelspace : {:.2f}'.format(self.delta_p))
print('------------------------------------------------')
print(' SNR & UNCERTAINTIES ')
print('------------------------------------------------')
f_snr_max, s_snr_max = self.f_snr_max, self.s_snr_max
print('S/N in order #57: {:.1f} (flat), {:.1f} (star)'.format(f_snr_max[1], s_snr_max[1]))
print('S/N in order #58: {:.1f} (flat), {:.1f} (star)'.format(f_snr_max[0], s_snr_max[0]))
print('------------------------------------------------')
snx = [self.s_snr_X[0], self.s_snr_X[1], self.s_snr_X[2], self.s_snr_X[3]]
snr =[self.sigma_s_snr[0]*100,self.sigma_s_snr[1]*100,self.sigma_s_snr[2]*100,self.sigma_s_snr[3]*100]
std = [self.sigma_V*100, self.sigma_K1*100, self.sigma_H1*100, self.sigma_R*100]
print('Bandpass : V K H R | Total')
print('S/N : {:.3g} {:.3g} {:.3g} {:.3g} |'.format(snx[0], snx[1], snx[2], snx[3]))
print('sigma(S/N): {:.3g}% {:.3g}% {:.3g}% {:.3g}% | {:.1f}%'.format(snr[0], snr[1], snr[2],\
snr[3], np.sum(snr)))
print('sigma(std): {:.3g}% {:.3g}% {:.3g}% {:.3g}% | {:.1f}%'.format(std[0], std[1], std[2],\
std[3], np.sum(std)))
print('sigma(wav): | {:.2f}%'.format(self.sigma_w*100))
print('sigma(fla): | {:.2f}%'.format(self.sigma_f*100))
print('------------------------------------------------')
print(' S INDEX ')
print('------------------------------------------------')
print(self.s1)
print(self.sn)
print(self.sm)
print(self.s2)
print(self.sp)
print('------------------------------------------------')
########################################################################################################
# OPTIMAL WIDTHS #
########################################################################################################
def find_optimal_width(self, image=None, trace=None, plot=0):
"""
This utility takes most preferably a reduced flat image and the polynomial describtion traced,
and first cut out a bandpass defined by disp_lenght and cross_width. Looping through increasing
spatials widths the S/N ratio is found for each, and the spatial width asigned to the highest
S/N ratio is optimal for linear extraction. To return the results in terms of FWHM a Gauss function
is fitted to the spatial width of maximum flux.
"""
# Check if 'image' and 'trace' is defined:
if image==None: image = self.F_calib
if trace==None: trace = self.trace
# Cut out order:
widths = np.arange(1, 40)
order = self.cut_out_order(image, np.polyval(trace['order_2'], self.disp), widths[-1])
# Find maximum of blaze function:
blaze = order.sum(axis=1)
blaze_max = np.max(blaze)
index_max = np.nanargmax(blaze)
# Find mean sky background along disp direction used for S/N ratio:
flux_inter, _ = self.mean_background(image, trace, plot=0)
# Loop over spatial widths:
snr = np.zeros(len(widths))
for w in widths:
order_w = order[index_max, widths[-1]-1-w:widths[-1]-1+w]
flux_order = np.sum(order_w)
snr[w-1] = self.signal_to_noise(flux_order, len(order_w), flux_order)
# Find highest S/N ratio optimal order width:
index_max_snr = np.argmax(snr)
optimal_order_width = widths[index_max_snr]
# Find residual inter-order width:
order_distance = int(((self.ref_cen_pos[1] - self.ref_cen_pos[2]) + \
(self.ref_cen_pos[2] - self.ref_cen_pos[3]))/2)
#optimal_inter_order_width = int(order_distance - 2.5*optimal_order_width)
#--------------------------------------------------------------
if plot is 1:
pt.plot_optimal_width(widths, order, blaze_max, index_max, flux_inter, snr, optimal_order_width)
#--------------------------------------------------------------
self.order_width = optimal_order_width
#--------------------------------------------------------------
return self.order_width
def mean_background(self, image, trace, plot=0):
"""
This utility use 'trace' and 'cut_out_order' to select the pixel sky-background in a bandpass on
both sides of the order of interest. In spatial direction on each side the median pixel value is
found, and lastly the mean value of each side is then computed. Returned is a 1D spectrum describing
the background (e.g. used by the 'signal_to_noise' utility).
"""
# Find midpoint of inter orders:
midpoint_below = (self.ref_cen_pos[1] - self.ref_cen_pos[2])/2
midpoint_above = (self.ref_cen_pos[2] - self.ref_cen_pos[3])/2
# Move fit to the midpoint of inter orders:
yfit_below = np.polyval(trace['order_1'], self.disp) + np.ones(len(self.disp))*midpoint_below
yfit_above = np.polyval(trace['order_2'], self.disp) + np.ones(len(self.disp))*midpoint_above
yfit_order = np.polyval(trace['order_2'], self.disp) + np.ones(len(self.disp))
# Set cross width for background cut to half the distance between orders:
# (here the position of the order is a limitation)
cross_order_width = math.floor(yfit_below[0])*2 - 1
# (else if order are moved up use)
#cross_order_width = int((self.ref_cen_pos[1] - self.ref_cen_pos[2])[0]/2 - 1)
# Cut out stellar background on both sides:
back_below = self.cut_out_order(image, yfit_below, cross_order_width)
back_above = self.cut_out_order(image, yfit_above, cross_order_width)
# Sum order to 1D spectrum and mean them:
l_sky = (np.median(back_below, axis=1) + np.median(back_above, axis=1))/2.
flux_sky_mean = abs(l_sky.mean())
#-----------------------------------------------------------:
if plot is 1: pt.plot_sky_background(image, self.disp, yfit_below, yfit_above, yfit_order, l_sky)
#--------------------------------------------------------------
return flux_sky_mean, l_sky
def signal_to_noise(self, flux_star, n_pix_star, flux_sky):
"""
This function calculates the S/N ratio using the 1D spectrum of the object and sky-background.
Purely by statistics with and increasing number of pixel used to define the object 'n_pix_object',
the S/N ratio will decrease. The noise sources describing a CCD are the 'gain' (e-/ADU) and 'ron',
read-out-noise (e-).
"""
# See Schroeder (1999) p. 317 or Bradt (2004) p. 163:
signal = flux_star*self.gain
noise = np.sqrt(flux_star*self.gain + flux_sky*self.gain*n_pix_star + self.ron*n_pix_star)
#--------------------------------------------------------------
return signal / noise
########################################################################################################
# GENERAL UTILITIES SPECIALIZED TO THIS SOFTWARE #
########################################################################################################
def blue_moves(self, path, plot=0):
"""
This routine measures the drift of the spectrum over time by using ThAr lines in the same order
as the Ca II H & K lines. (Fun fact: the software name comes from 'Blue Moves' which is the eleventh
studio album release by <NAME>, released in October 1976.
"""
# Load all files from same folder:
img_files = np.sort(glob.glob('{}{}*'.format(path, self.img_name)))
hdu = np.array([fits.open(str(files)) for files in img_files])
n = len(img_files)
# Find time scaling to utc time and Julian Date
time = [hdu[i][0].header['JD-DATE'] for i in range(n)]
# Loop through all ThAr images:
move_x = np.zeros(n)
move_y = np.zeros(n)
sigma_x = np.zeros(n-1)
sigma_y = np.zeros(n-1)
for i in range(n):
# Open and close one image at a time:
with fits.open(str(img_files[i])) as hdu_i:
# Select focused spectral region:
T_i = hdu_i[0].data[300:480, 420:2270].T
# UTILITY CALL: Locate coordinates of lines:
COF_i, _, _ = self.peak_finder(T_i, sigma=5, plot=0)
# UTILITY CALL: Remove lines too close to borders:
COF_i, N_lines = self.image_border(T_i, COF_i)
# UTILITY CALL: Only use same lines each time:
if i==0:
#COF_0, _, _ = self.peak_finder(T_i, sigma=5, plot=0)
COF_0 = COF_i
if i is not 0:
indices0, indices1 = self.match_coordinates(COF_0, COF_i, threshold=5, plot=1)
# Find scatter of the drift for each line:
if i > 1:
diff_x = COF_i[indices1,0] - x
diff_y = COF_i[indices1,1] - y
sigma_x[i-1] = np.std(diff_x)
sigma_y[i-1] = np.std(diff_y)
# Find coordinates (x and y needs to be after if < 1 statement):
x = COF_i[indices1,0]
y = COF_i[indices1,1]
move_x[i] = x.mean()
move_y[i] = y.mean()
# Print to bash:
pt.compilation(i, n, 'Blue Moves')
print
# Convert to relative changes:
move_x = move_x[1::] - move_x[1::].mean()
move_y = move_y[1::] - move_y[1::].mean()
time = time[1::]
#-----------------------------------------------------------
if plot is 1:
np.savetxt('{}bluemoves.txt'.format(self.path), np.vstack([time, move_y, sigma_y]).T)
pt.plot_rv_stability(time, move_y, sigma_y)
#-----------------------------------------------------------
return
def image_border(self, image, pixel_coor, border_edge=20):
"""
This utility takes an array of pixel coordinates and finds coordinates that is closer than 20 pixels
to the image 'border_edge'. These coordinates are then removed from the array and a new array,
'new_pixel_coor', is returned together with the new (lower) number of coordinates 'N_coor'.
"""
# Unpack pixel coordinates:
x = pixel_coor[:,0]
y = pixel_coor[:,1]
# Check if stellar coordinates are too close to borders:
i_x1 = np.where(x < border_edge)[0]
i_y1 = np.where(y < border_edge)[0]
i_x2 = np.where(x > np.shape(image)[0]-border_edge)[0]
i_y2 = np.where(y > np.shape(image)[1]-border_edge)[0]
i_xy = np.hstack([i_x1, i_x2, i_y1, i_y2])
# Discard these coordinates:
x_new = np.delete(x, i_xy)
y_new = np.delete(y, i_xy)
N_coor = len(x)
#-----------------------------------------------------------
return np.array([x_new, | |
import os, glob, warnings
import numpy as np
from collections import OrderedDict
from typing import Union, Dict, List
from pygromos.files import imd, repdat
from pygromos.utils import bash
import reeds.function_libs.analysis.free_energy
import reeds.function_libs.analysis.parameter_optimization
import reeds.function_libs.analysis.sampling as sampling_ana
import reeds.function_libs.optimization.eds_energy_offsets as eds_energy_offsets
import reeds.function_libs.analysis.replica_exchanges as repex
import reeds.function_libs.visualization.pot_energy_plots
import reeds.function_libs.visualization.re_plots as re_plots
from reeds.function_libs.file_management import file_management
from reeds.function_libs.file_management.file_management import parse_csv_energy_trajectories
from reeds.function_libs.utils import s_log_dist as sdist
from reeds.function_libs.utils.structures import adding_Scheme_new_Replicas
template_control_dict = OrderedDict({ # this dictionary is controlling the post Simulation analysis procedure!
"concat": {"do": True,
"sub": {
"cp_cnf": True,
"cat_trc": True,
"cat_tre": False,
"ene_ana": True,
"convert_trcs": False,
"cat_repdat": True, }
},
"plot_property_timeseries": {"do": True,
"sub": {
"pot_ene_by_state":True,
"pot_ene_by_replica":False,
"pot_ene_timeseries": False,
"pot_ene_grid_timeseries": True,
"ref_timeseries": True,
"ref_distrib": False,
"distance_restraints": False,
"temperature_2d_plot": False
}
},
"eoffset": {"do": True,
"sub": {
"calc_eoff": True,
"sampling_plot": True, }
},
"sopt": {"do": True,
"sub": {
"detect_flow_equilib": True,
"run_RTO": True,
"run_NLRTO": True,
"run_NGRTO": False,
"visualize_transitions": True,
"roundtrips": True,
"generate_replica trace": True}
},
"phys_sampling": {"do": True},
"dfmult": {"do": False},
"compress_simulation_folder": {"do": True},
"prepare_input_folder": {"do": True,
"sub": {
"eoff_to_sopt": False,
"write_eoff": False,
"write_s": True
},
}
})
def dict_to_nice_string(control_dict: Dict) -> str:
"""
Converts a dictionary of options (like template_control_dict)
to a more human readable format. Which can then be printed to a text file,
which can be manually modified before submiting analysis jobs.
Parameters
----------
control_dict : Dict
analysis control dictonary
Returns
-------
str
nice formatting of the control dictionary for printing.
"""
script_text = "control_dict = {\n"
for key, value in control_dict.items():
script_text += "\t\"" + key + "\": "
first = False
if (type(value) == dict):
if ("do" in value): # do should always be first in this list
script_text += "{\"do\":" + str(value["do"]) + ","
if (len(value) > 1):
script_text += "\n"
first = True
for key2, value2 in value.items(): # alternative keys
# prefix
if (first):
prefix = " "
first = False
else:
prefix = "\t\t"
# key_val
if (key2 == "do"):
continue
elif (type(value2) == dict):
script_text += prefix + "\"" + str(key2) + "\": " + _inline_dict(value2, "\t\t\t") + ",\n"
else:
script_text += prefix + "\"" + str(key2) + "\": " + str(value2) + ","
script_text += prefix + " },\n"
else:
script_text += str(value) + ",\n"
script_text += "}\n"
return script_text
def _inline_dict(in_dict: Dict, prefix: str = "\t"):
"""
translate dictionary to one code line. can be used for meta-scripting
Parameters
----------
in_dict: Dict
analysis control dict
prefix : str, optional
prfix symbol to dict write out.
Returns
-------
str
code line.
"""
msg = "{\n"
for key, value in in_dict.items():
if (type(value) == dict):
msg += prefix + "\"" + str(key) + "\": " + _inline_dict(in_dict=value, prefix=prefix + "\t") + ","
else:
msg += prefix + "\"" + str(key) + "\": " + str(value) + ",\n"
return msg + prefix + "}"
def check_script_control(control_dict: dict = None) -> dict:
if isinstance(control_dict, type(None)):
return template_control_dict
else:
for x in template_control_dict:
if x not in control_dict:
control_dict.update({x: template_control_dict[x]})
return control_dict
def do_Reeds_analysis(in_folder: str, out_folder: str, gromos_path: str,
topology: str, in_ene_ana_lib: str, in_imd: str,
optimized_eds_state_folder: str = "../a_optimizedState/analysis/data",
state_undersampling_occurrence_potential_threshold: List[float] = None,
state_physical_occurrence_potential_threshold: List[float] = None,
undersampling_frac_thresh: float = 0.9,
add_s_vals: int = 0, state_weights: List[float]=None, s_opt_trial_range:int=None,
adding_new_sReplicas_Scheme: adding_Scheme_new_Replicas = adding_Scheme_new_Replicas.from_below,
grom_file_prefix: str = "test", title_prefix: str = "test", ene_ana_prefix="ey_sx.dat",
repdat_prefix: str = "run_repdat.dat",
n_processors: int = 1, verbose=False, dfmult_all_replicas=False,
control_dict: Dict[str, Union[bool, Dict[str, bool]]] = None) -> (
dict, dict, dict):
"""
Master calling point from which all jobs can call the analysis functions for a RE-EDS simulation.
This function generates: plots, compress files, and/or calculate values of interest.
Parameters
----------
in_folder : str
input folder for the simulation.
out_folder : str
output folder for the simulation
gromos_path : str
gromosPP binary path
topology : str
path to topology
in_ene_ana_lib : str
in path for ene_ana lib
in_imd : str
in path for imd_file
optimized_eds_state_folder : str, optional
path to optimized eds_state folders (default: "../a_optimizedState/analysis/data")
pot_tresh : float, optional
potential energy treshold (default: 0)
undersampling_frac_thresh : float, optional
fraction threshold (default: 0.9)
take_last_n : int, optional
this parameter can be used to force the energy offset estimation to use a certain amount of replicas. (default: None)
add_s_vals : int, optional
this parameter can be used to add a number of s-values during the s-optimization (default: 0)
state_weights : List[float], optional
allows to weight the different states in the s-optimization differently (default: None)
s_opt_trial_range : int, optional
give a range of trials, that define the start and end of the s-optimization run (default: adding_Scheme_new_Replicas.from_below)
adding_new_sReplicas_Scheme : int, optional
how shall the coordinates for new replicas be added to an exchange bottle-neck. (default: adding_Scheme_new_Replicas.from_below)
grom_file_prefix : str, optional
provide here a gromos_file prefix of this run (default: test)
title_prefix : str, optional
proivde here a output_prefx and plot prefix (default: test)
ene_ana_prefix : str, optional
prefix for the ene ana analysis @WARNING: NOT USED ANYMORE! - FUTURE REMOVE!.
repdat_prefix : str, optional
prefix for the repdat files. required to read in the repdats. (default:run_repdat.dat )
n_processors : int, optional
number of processors
verbose : bool, optional
verbosity level
dfmult_all_replicas : bool, optional
shall dfmult be calculated for all replicas
control_dict : dict, optional
control dict for analysis
Returns
-------
(dict, dict, dict)
eoff_statistic, svals, dFs - the function returns the eoff_statistics,
the s-values of the s-optimization-results and the free energy calculation results,
if calculated.
"""
eoff_statistic = {}
svals = {}
dFs = {}
print("Starting RE-EDS analysis:")
# subfolder for clearer structure
plot_folder_path = out_folder + "/plots"
concat_file_folder = bash.make_folder(out_folder + "/data", "-p")
if (not os.path.exists(out_folder)):
print("Generating out_folder: ", out_folder)
bash.make_folder(out_folder)
if (not os.path.exists(concat_file_folder)):
bash.make_folder(concat_file_folder)
# out_files
repdat_file_out_path = concat_file_folder + "/" + title_prefix + "_" + repdat_prefix
ene_trajs_prefix = title_prefix + "_energies"
# manual script control
control_dict = check_script_control(control_dict)
# parameter file: <-not needed!
# if(verbose): print("Reading imd: "+in_imd)
imd_file = imd.Imd(in_imd)
s_values = list(map(float, imd_file.REPLICA_EDS.RES))
Eoff = np.array(list(map(lambda vec: list(map(float, vec)), imd_file.REPLICA_EDS.EIR))).T
num_states = int(imd_file.REPLICA_EDS.NUMSTATES)
try:
if (not isinstance(imd_file.MULTIBATH, type(None))):
temp = float(imd_file.MULTIBATH.TEMP0[0])
elif (not isinstance(imd_file.STOCHDYN, type(None))):
temp = float(imd_file.STOCHDYN.TEMPSD)
else:
raise Exception("Either STOCHDYN or MULTIBATH block needs to be defined in imd.")
except Exception as err:
print("Failed during analysis\n\t" + "\n\t".join(map(str, err.args)))
exit(1)
if (control_dict["concat"]["do"]):
print("STARTING CONCATENATION.")
num_replicas = len(s_values)
# if we're using Stochastic Dynamics, use solutemp2 for ene_ana instead of solvtemp2
if (isinstance(imd_file.MULTIBATH, type(None)) and not isinstance(imd_file.STOCHDYN, type(None))):
additional_properties = ("solutemp2", "totdisres")
boundary_conditions = "v cog"
# if there's only one bath, use solutemp2 for ene_ana instead of solvtemp2
elif (not isinstance(imd_file.MULTIBATH, type(None)) and imd_file.MULTIBATH.NBATHS == "1"):
additional_properties = ("solutemp2", "totdisres")
boundary_conditions = "r cog"
else:
additional_properties = ("solvtemp2", "totdisres")
boundary_conditions = "r cog"
out_files = file_management.reeds_project_concatenation(in_folder=in_folder, in_topology_path=topology,
in_imd=in_imd, num_replicas=num_replicas,
control_dict=control_dict["concat"]["sub"],
out_folder=concat_file_folder,
in_ene_ana_lib_path=in_ene_ana_lib,
repdat_file_out_path=repdat_file_out_path,
out_file_prefix=grom_file_prefix, starting_time=0,
n_processes=n_processors, gromosPP_bin_dir=gromos_path,
verbose=False,
additional_properties=additional_properties,
boundary_conditions=boundary_conditions)
if (verbose): print("Done\n")
# intermezzo generating plots_folder
if (not os.path.exists(plot_folder_path)):
plot_folder_path = bash.make_folder(plot_folder_path)
# Set this to None as a checker to avoid redundant parsing
energy_trajectories = None
if (control_dict["plot_property_timeseries"]["do"]):
sub_control = control_dict["plot_property_timeseries"]["sub"]
if (verbose): print("\tParse the data:\n")
# No need to check if trajectories are parsed here, as it is the first access point.
energy_trajectories = parse_csv_energy_trajectories(concat_file_folder, ene_trajs_prefix)
# Plots related to the potential energy distributions of the end states.
if sub_control["pot_ene_by_state"]:
if (verbose): print("\n\tPlotting end state potential energy distributions (by state)\n")
for state_num in range(1, num_states+1):
outfile = plot_folder_path + '/' + title_prefix + '_pot_ene_state_' + str(state_num) + '.png'
reeds.function_libs.visualization.pot_energy_plots.plot_energy_distribution_by_state(energy_trajectories, outfile, state_num, s_values,
manual_xlim = None, shared_xaxis = True)
if sub_control["pot_ene_by_replica"]:
if (verbose): print("\n\tPlotting end state potential energy distributions (by replica)\n")
for replica_num in range(1, len(energy_trajectories) + 1):
outfile = plot_folder_path + '/' + title_prefix + '_pot_ene_replica_' + str(replica_num) + '.png'
reeds.function_libs.visualization.pot_energy_plots.plot_energy_distribution_by_replica(energy_trajectories[replica_num - 1], outfile,
replica_num, s_values[replica_num-1],
manual_xlim = None, shared_xaxis = True)
# this variable allows to access particular elements in the pandas DataFrame
singleStates = ['e' + str(i) for i in range(1, num_states+1)]
# Timeseries of the potential energy of the end states.
for i, ene_traj in enumerate(energy_trajectories):
if sub_control["pot_ene_timeseries"]:
out_path | |
case1 = True
if polygon_array[row, 2] == 0:
length_not_given += 1
if polygon_array[row, 1] == 0:
angles_not_given += 1
#case1
if case1 == True:
return "case 1"
else:
if length_not_given >= 2: #parallelness needs to be solved in case2calc function
return "case 2"
elif length_not_given == 1:
if angles_not_given >= 2:
return "case 3"
else:
return "case 1" #1l 0-1a
else: #all lengths known - like crossroads!
return "case 4"
def case1calc(polygon_array):
"""
Calculates points of a vector polygon where at least 1 edge length is unknown, we start
drawing from (0, 0) with the next domain after this choosen 'loose edge'. We assume
angles and length for every cell other than this loose end. Calculate the vertices
and calculate the last edge length and two last angles.
"""
def resort_list_to_start_by_spec_value(dom_index, list_with_value):
index = list_with_value.index(dom_index)
sorted_list = []
for i in range(len(list_with_value)):
sorted_list.append(list_with_value[index-len(list_with_value)+i])
return sorted_list
n_poly = len(polygon_array[:,0])
given_ang_list = []
for row in range(n_poly):
if polygon_array[row-1, 2] == 0 and polygon_array[row-1, 1] == 0 and polygon_array[row, 1] == 0:
start = row
if polygon_array[row, 1] != 0:
given_ang_list.append(polygon_array[row, 1])
#distribute remaining angles
poly_angles_sum = (n_poly - 2) * 180
remaining_average = (poly_angles_sum - sum(given_ang_list))/(n_poly - len(given_ang_list))
for row in range(n_poly):
if polygon_array[row, 1] == 0:
polygon_array[row, 1] = remaining_average #set all unknown angles for this mean value
#get length/angle mean ratio
known_len_angle_sum = []
for row in range(n_poly):
if polygon_array[row-1, 2] != 0: #non-null length
len_angle_sum_multip_here = (polygon_array[row-1, 1] + polygon_array[row, 1]) * polygon_array[row-1, 2]
known_len_angle_sum.append(len_angle_sum_multip_here)
len_angle_ratio = np.mean(known_len_angle_sum)
#fill length col by using len_angle_ratio
for row in range(n_poly):
if polygon_array[row-1, 2] == 0: #null length! :)
new_len = len_angle_ratio / (polygon_array[row-1, 1] + polygon_array[row, 1])
polygon_array[row-1, 2] = new_len
#get drawing order by resorting index list
index_list = [n for n in range(n_poly)]
resorted_list = resort_list_to_start_by_spec_value(start, index_list)
resorted_list.pop() #remove last item which is the 'loose end' domain
#the table is filled and we can start calculating coords at domain 'start'
coords = np.array([[0, 0]])
for index_in_list in range(len(resorted_list)):
ang_here = polygon_array[resorted_list[index_in_list], 1]
len_here = polygon_array[resorted_list[index_in_list], 2]
if index_in_list == 0:
curr_coords = np.array([[len_here, 0]])
abs_ang = 0
else:
abs_ang += 180-ang_here #abs angle is the outside angle of the polygon, adding them up
curr_coords = coords[-1] + np.array([[np.cos(deg_to_rad(abs_ang)) * len_here,
np.sin(deg_to_rad(abs_ang)) * len_here]])
coords = np.append(coords, curr_coords, axis=0)
#the last prev_coords we get and [0, 0] adds the last side of the polygon
len_last = np.linalg.norm(coords[-1])
polygon_array[start-1, 2] = len_last
#angle at start-1
vector1 = coords[-2] - coords[-1]
vector2 = coords[0] - coords[-1]
angle = calc_degree(vector1, vector2)
polygon_array[start-1, 1] = angle
#angle at start
vector1 = coords[-1] - coords[0]
vector2 = coords[1] - coords[0]
angle = calc_degree(vector1, vector2)
polygon_array[start, 1] = angle
return polygon_array
def feed_polyarr_to_structarray(struct_info_array, polygon_array, cycle, paired_node_list):
"""Write back calculated relative angles and domain lengths(if appropriate) to SIA"""
for poly_index in range(len(polygon_array[:,0])):
curr_dom_index = int(polygon_array[poly_index, 0])
poly_ang = polygon_array[poly_index, 1]
poly_len = polygon_array[poly_index, 2]
side = struct_info_array[curr_dom_index, 1]
cyc_index = cycle.index(curr_dom_index)
if curr_dom_index not in paired_node_list: #at the start of an unpaired there is always a polygon angle
struct_info_array[curr_dom_index, 2] = poly_len #if unpaired, the length we write over
if cycle[cyc_index-1] not in paired_node_list: #unpaired-unpaired
rel_angle = (poly_ang - 180) / side
struct_info_array[curr_dom_index, 0] = rel_angle
else: #unpaired - paired(prev)
rel_angle = (poly_ang - 90) / side
struct_info_array[curr_dom_index, 0] = rel_angle
else:
if cycle[cyc_index - 1] not in paired_node_list: #paired-unpaired
rel_angle = (poly_ang - 90) / side
struct_info_array[curr_dom_index, 0] = rel_angle
else: #it is always the right paired-paired, as we only put those in the table
rel_angle = poly_ang*side
struct_info_array[curr_dom_index, 0] = rel_angle
return struct_info_array
#MAIN LOOP
for cycle in cycle_list_G:
#treat hairpin and paired into paired separately ~ no polygon there
polygon_sides = 0
for item in cycle:
if item in paired_node_list:
polygon_sides += 0.5
else:
polygon_sides += 1
if polygon_sides == 2: #special cases
if len(cycle) == 3: #hairpin, nothing to do here
pass
elif len(cycle) == 4: #paired into paired, can fill in two 0 angles
cycle.sort() #the domains will be after this always the secomd and fourth in cycle
struct_info_array[cycle[1], 0] = 0
struct_info_array[cycle[3], 0] = 0
elif polygon_sides >= 3: #has a polygon
polygon_array = initiate_polygon_table(cycle, struct_info_array, paired_dict, paired_node_list)
if indicate_polygon_case(polygon_array) == "case 1":
polygon_array = case1calc(polygon_array)
elif indicate_polygon_case(polygon_array) == "case 2":
pass
elif indicate_polygon_case(polygon_array) == "case 3":
pass
elif indicate_polygon_case(polygon_array) == "case 4":
pass
#add calculated info back to SIA
struct_info_array = feed_polyarr_to_structarray(struct_info_array, polygon_array, cycle, paired_node_list)
return struct_info_array
#determine order of traversal (order of domains to build up)
def determine_contstruct_order(skeleton_graph, paired_dict):
cycle_list_G = nx.cycle_basis(skeleton_graph)
paired_node_list = []
for node in paired_dict:
paired_node_list.append(node)
paired_node_list.append(paired_dict[node])
def find_cycle_where_index_belongs(dom_index, cycle_list_G): #now finds largest cycle
found = []
for cycle in cycle_list_G:
if dom_index in cycle and len(cycle) > len(found):
found = cycle
return found
def resort_list_to_start_by_spec_value(dom_index, list_with_value):
index = list_with_value.index(dom_index)
sorted_list = []
for i in range(len(list_with_value)):
sorted_list.append(list_with_value[index-len(list_with_value)+i])
return sorted_list
def give_pair_of_domain(dom_index, paired_dict):
if dom_index in paired_dict:
return paired_dict[dom_index]
elif dom_index in list(paired_dict.values()):
index_of_pair = list(paired_dict.keys())[list(paired_dict.values()).index(dom_index)]
return index_of_pair
else:
raise ValueError('not actually paired')
#
traverse_order = [0]
for node in traverse_order:
current_in_travord = traverse_order.index(node)
counter = 0 #helps with insert index
#pair primary
if node in paired_node_list:
pair_first = give_pair_of_domain(node, paired_dict) #pair first!
if pair_first not in traverse_order:
traverse_order.insert(current_in_travord + 1, pair_first)
counter += 1
#cycle secondary
its_cycle = find_cycle_where_index_belongs(node, cycle_list_G)
if len(its_cycle) != 0: #if in cycle
its_cycle.sort() #sort it first, bugfix
#resort so current is at first place
resorted_cycle = resort_list_to_start_by_spec_value(node, its_cycle)
for item in resorted_cycle:
if item not in traverse_order:
traverse_order.insert(current_in_travord + counter + 1, item)
counter += 1
#other neighbor tertiary
neighbors = skeleton_graph.neighbors(node)
for neighbor in neighbors: #other neighbor third!
if neighbor not in traverse_order:
traverse_order.append(neighbor)
return traverse_order
def stepwise_buildup(struct_info_array2, paired_dict, traverse_order, skeleton_graph):
"""
Taking the more-or-less filled SIA, we go along the traversing/drawing order, assume lengths and angles for
unpaired, out-of-loop domains and loop starters.
"""
paired_dist = 10
dom_count = len(traverse_order)
coordinate_array = np.zeros((dom_count, 4))
#add a new col for absolute angle values
struct_info_array2 = np.append(struct_info_array2, [[0] for i in range(len(struct_info_array2[:,0]))], axis=1)
#average over given lengths to get a default unpaired length
n_given_len = 0
sum_length = 0
for length in struct_info_array2[:,2]:
if length != 0:
n_given_len += 1
sum_length += length
if n_given_len == 0:
default_length = 30
else:
default_length = sum_length/n_given_len
#paired list as before
paired_node_list = []
for node in paired_dict:
paired_node_list.append(node)
paired_node_list.append(paired_dict[node])
def get_red_neigh(domain_index, skeleton_graph):
connections = [n for n in skeleton_graph.edges.data(nbunch=domain_index)]
red_neigh = []
for edge in connections:
if edge[2]['color'] is 'r':
red_neigh.append(edge[1])
return red_neigh
def give_pair_of_domain(dom_index, paired_dict):
if dom_index in paired_dict:
return paired_dict[dom_index]
elif dom_index in list(paired_dict.values()):
index_of_pair = list(paired_dict.keys())[list(paired_dict.values()).index(dom_index)]
return index_of_pair
else:
raise ValueError('not actually paired')
def get_preferred_angle(domain_index, paired_node_list, skeleton_graph, traverse_order): #would be better with name_final
neighbors = get_red_neigh(domain_index, skeleton_graph)
if min(neighbors) == domain_index-1: #connected to prev dom
if traverse_order.index(min(neighbors)) < traverse_order.index(domain_index):
if min(neighbors) in paired_node_list and domain_index in paired_node_list:
pref_angle = 60
elif min(neighbors) in paired_node_list and domain_index not in paired_node_list:
pref_angle = 90
elif min(neighbors) not in paired_node_list and domain_index in paired_node_list:
pref_angle = 90
elif min(neighbors) not in paired_node_list and domain_index not in paired_node_list:
pref_angle = 0
elif traverse_order.index(min(neighbors)) > traverse_order.index(domain_index): #if backwards (SB case)
if max(neighbors) in paired_node_list and domain_index in paired_node_list:
pref_angle = 60
elif max(neighbors) in paired_node_list and domain_index not in paired_node_list:
pref_angle = 45
elif max(neighbors) not in paired_node_list and domain_index in paired_node_list:
pref_angle = 45
elif max(neighbors) not in paired_node_list and domain_index not in paired_node_list:
pref_angle = 180
else: #not connected to prev dom, so only | |
(string) --The value for the resource tag.
:type KmsKeyId: string
:param KmsKeyId: The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.
:type EnhancedVpcRouting: boolean
:param EnhancedVpcRouting: An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.
If this option is true , enhanced VPC routing is enabled.
Default: false
:type AdditionalInfo: string
:param AdditionalInfo: Reserved.
:type IamRoles: list
:param IamRoles: A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.
A cluster can have up to 10 IAM roles associated with it at any time.
(string) --
:rtype: dict
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': '<PASSWORD>',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
]
}
}
:returns:
available
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def create_cluster_parameter_group(ParameterGroupName=None, ParameterGroupFamily=None, Description=None, Tags=None):
"""
Creates an Amazon Redshift parameter group.
Creating parameter groups is independent of creating clusters. You can associate a cluster with a parameter group when you create the cluster. You can also associate an existing cluster with a parameter group after the cluster is created by using ModifyCluster .
Parameters in the parameter group define specific behavior that applies to the databases you create on the cluster. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.create_cluster_parameter_group(
ParameterGroupName='string',
ParameterGroupFamily='string',
Description='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]
The name of the cluster parameter group.
Constraints:
Must be 1 to 255 alphanumeric characters or hyphens
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Must be unique withing your AWS account.
Note
This value is stored as a lower-case string.
:type ParameterGroupFamily: string
:param ParameterGroupFamily: [REQUIRED]
The Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters.
To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups . By default, Amazon Redshift returns a list of all the parameter groups that are owned by your AWS account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is 'redshift-1.0'.
:type Description: string
:param Description: [REQUIRED]
A description of the parameter group.
:type Tags: list
:param Tags: A list of tag instances.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
:rtype: dict
:return: {
'ClusterParameterGroup': {
'ParameterGroupName': 'string',
'ParameterGroupFamily': 'string',
'Description': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
"""
pass
def create_cluster_security_group(ClusterSecurityGroupName=None, Description=None, Tags=None):
"""
Creates a new Amazon Redshift security group. You use security groups to control access to non-VPC clusters.
For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.create_cluster_security_group(
ClusterSecurityGroupName='string',
Description='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ClusterSecurityGroupName: string
:param ClusterSecurityGroupName: [REQUIRED]
The name for the security group. Amazon Redshift stores the value as a lowercase string.
Constraints:
Must contain no more than 255 alphanumeric characters or hyphens.
Must not be 'Default'.
Must be unique for all security groups that are created by your AWS account.
Example: examplesecuritygroup
:type Description: string
:param Description: [REQUIRED]
A description for the security group.
:type Tags: list
:param Tags: A list of tag instances.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
:rtype: dict
:return: {
'ClusterSecurityGroup': {
'ClusterSecurityGroupName': 'string',
'Description': 'string',
'EC2SecurityGroups': [
{
'Status': 'string',
'EC2SecurityGroupName': 'string',
'EC2SecurityGroupOwnerId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'IPRanges': [
{
'Status': 'string',
'CIDRIP': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
"""
pass
def create_cluster_snapshot(SnapshotIdentifier=None, ClusterIdentifier=None, Tags=None):
"""
Creates a manual snapshot of the specified cluster. The cluster must be in the available state.
For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.create_cluster_snapshot(
SnapshotIdentifier='string',
ClusterIdentifier='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]
A unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the AWS account.
Constraints:
Cannot be null, empty, or blank
Must contain from 1 to 255 alphanumeric characters or hyphens
First character must be a letter
Cannot end with a hyphen or contain two consecutive hyphens
Example: my-snapshot-id
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]
The cluster identifier for which you want a snapshot.
:type Tags: list
:param Tags: A list of tag instances.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
:rtype: dict
:return: {
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False
}
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def create_cluster_subnet_group(ClusterSubnetGroupName=None, Description=None, SubnetIds=None, Tags=None):
"""
Creates | |
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
updated_stack = stack.Stack(self.ctx, 'updated_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(
'abc',
self.stack['AResource']._stored_properties_data['Foo'])
self.assertEqual(
'ID-AResource',
self.stack['BResource']._stored_properties_data['Foo'])
self.m.VerifyAll()
def test_create_bad_attribute(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Fn::GetAtt': ['AResource',
'Foo']}}}}}
self.stack = stack.Stack(self.ctx, 'bad_attr_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.m.StubOutWithMock(generic_rsrc.ResourceWithProps,
'_update_stored_properties')
generic_rsrc.ResourceWithProps._update_stored_properties().AndRaise(
exception.InvalidTemplateAttribute(resource='a', key='foo'))
self.m.ReplayAll()
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED),
self.stack.state)
self.assertEqual('Resource CREATE failed: The Referenced Attribute '
'(a foo) is incorrect.', self.stack.status_reason)
self.m.VerifyAll()
def test_stack_create_timeout(self):
self.m.StubOutWithMock(scheduler.DependencyTaskGroup, '__call__')
self.m.StubOutWithMock(timeutils, 'wallclock')
stk = stack.Stack(self.ctx, 's', self.tmpl)
def dummy_task():
while True:
yield
start_time = time.time()
timeutils.wallclock().AndReturn(start_time)
timeutils.wallclock().AndReturn(start_time + 1)
scheduler.DependencyTaskGroup.__call__().AndReturn(dummy_task())
timeutils.wallclock().AndReturn(start_time + stk.timeout_secs() + 1)
self.m.ReplayAll()
stk.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.FAILED), stk.state)
self.assertEqual('Create timed out', stk.status_reason)
self.m.VerifyAll()
def test_stack_name_valid(self):
stk = stack.Stack(self.ctx, 's', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'stack123', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test.stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test_stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'TEST', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
stk = stack.Stack(self.ctx, 'test-stack', self.tmpl)
self.assertIsInstance(stk, stack.Stack)
def test_stack_name_invalid(self):
gt_255_chars = ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuv')
stack_names = ['_foo', '1bad', '.kcats', 'test stack', ' teststack',
'^-^', '"stack"', '1234', 'cat|dog', '$(foo)',
'test/stack', 'test\\stack', 'test::stack',
'test;stack', 'test~stack', '#test', gt_255_chars]
for stack_name in stack_names:
ex = self.assertRaises(
exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
self.assertIn("Invalid stack name %s must contain" % stack_name,
six.text_type(ex))
def test_stack_name_invalid_type(self):
stack_names = [{"bad": 123}, ["no", "lists"]]
for stack_name in stack_names:
ex = self.assertRaises(
exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
self.assertIn("Invalid stack name %s, must be a string"
% stack_name, six.text_type(ex))
def test_resource_state_get_att(self):
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = stack.Stack(self.ctx, 'resource_state_get_att',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertIn('AResource', self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
self.assertEqual('AResource', rsrc.FnGetAtt('Foo'))
for action, status in (
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.CREATE, rsrc.FAILED),
(rsrc.SUSPEND, rsrc.IN_PROGRESS),
(rsrc.SUSPEND, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.FAILED),
(rsrc.UPDATE, rsrc.COMPLETE),
(rsrc.DELETE, rsrc.IN_PROGRESS),
(rsrc.DELETE, rsrc.FAILED),
(rsrc.DELETE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
self.stack._update_all_resource_data(False, True)
self.assertEqual('AResource',
self.stack.outputs['TestOutput'].get_value())
def test_resource_required_by(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType',
'DependsOn': 'AResource'},
'CResource': {'Type': 'GenericResourceType',
'DependsOn': 'BResource'},
'DResource': {'Type': 'GenericResourceType',
'DependsOn': 'BResource'}}}
self.stack = stack.Stack(self.ctx, 'depends_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual(['BResource'],
self.stack['AResource'].required_by())
self.assertEqual([],
self.stack['CResource'].required_by())
required_by = self.stack['BResource'].required_by()
self.assertEqual(2, len(required_by))
for r in ['CResource', 'DResource']:
self.assertIn(r, required_by)
def test_resource_multi_required_by(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'},
'CResource': {'Type': 'GenericResourceType'},
'DResource': {'Type': 'GenericResourceType',
'DependsOn': ['AResource',
'BResource',
'CResource']}}}
self.stack = stack.Stack(self.ctx, 'depends_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
for r in ['AResource', 'BResource', 'CResource']:
self.assertEqual(['DResource'],
self.stack[r].required_by())
def test_store_saves_owner(self):
"""owner_id attribute of Store is saved to the database when stored."""
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,
owner_id=self.stack.id)
stack_ownee.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, stack_ownee.id)
self.assertEqual(self.stack.id, db_stack.owner_id)
def test_init_user_creds_id(self):
ctx_init = utils.dummy_context(user='my_user',
password='<PASSWORD>')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_init', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
self.assertEqual(creds.id, self.stack.user_creds_id)
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
self.assertEqual(ctx_expected, self.stack.stored_context().to_dict())
def test_tags_property_get_set(self):
self.stack = stack.Stack(self.ctx, 'stack_tags', self.tmpl)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertIsNone(test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.tags = ['tag1', 'tag2']
self.assertEqual(['tag1', 'tag2'], self.stack._tags)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertIsNone(test_stack._tags)
self.assertEqual(['tag1', 'tag2'], test_stack.tags)
self.assertEqual(['tag1', 'tag2'], test_stack._tags)
def test_load_reads_tags(self):
self.stack = stack.Stack(self.ctx, 'stack_tags', self.tmpl)
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertIsNone(test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
self.assertEqual(['tag1', 'tag2'], test_stack.tags)
def test_store_saves_tags(self):
self.stack = stack.Stack(self.ctx, 'tags_stack', self.tmpl)
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
self.stack.id)
self.assertIsNone(db_tags)
self.stack = stack.Stack(self.ctx, 'tags_stack', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
self.stack.id)
self.assertEqual('tag1', db_tags[0].tag)
self.assertEqual('tag2', db_tags[1].tag)
def test_store_saves_creds(self):
"""A user_creds entry is created on first stack store."""
cfg.CONF.set_default('deferred_auth_method', 'password')
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
# The store should've created a user_creds row and set user_creds_id
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
# should've stored the username/password in the context
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertEqual(self.ctx.username, user_creds.get('username'))
self.assertEqual(self.ctx.password, user_creds.get('password'))
self.assertIsNone(user_creds.get('trust_id'))
self.assertIsNone(user_creds.get('trustor_user_id'))
# Check the stored_context is as expected
expected_context = context.RequestContext.from_dict(self.ctx.to_dict())
expected_context.auth_token = None
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context.to_dict(), stored_context)
# Store again, ID should not change
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
def test_store_saves_creds_trust(self):
"""A user_creds entry is created on first stack store."""
cfg.CONF.set_override('deferred_auth_method', 'trusts')
self.m.StubOutWithMock(keystone.KeystoneClientPlugin, '_create')
keystone.KeystoneClientPlugin._create().AndReturn(
fake_ks.FakeKeystoneClient(user_id='auser123'))
keystone.KeystoneClientPlugin._create().AndReturn(
fake_ks.FakeKeystoneClient(user_id='auser123'))
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
self.stack.store()
# The store should've created a user_creds row and set user_creds_id
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
user_creds_id = db_stack.user_creds_id
self.assertIsNotNone(user_creds_id)
# should've stored the trust_id and trustor_user_id returned from
# FakeKeystoneClient.create_trust_context, username/password should
# not have been stored
user_creds = ucreds_object.UserCreds.get_by_id(self.ctx, user_creds_id)
self.assertIsNone(user_creds.get('username'))
self.assertIsNone(user_creds.get('password'))
self.assertEqual('atrust', user_creds.get('trust_id'))
self.assertEqual('auser123', user_creds.get('trustor_user_id'))
auth = self.patchobject(context.RequestContext,
'trusts_auth_plugin')
self.patchobject(auth, 'get_access',
return_value=fakes.FakeAccessInfo([], None, None))
# Check the stored_context is as expected
expected_context = context.RequestContext(
trust_id='atrust', trustor_user_id='auser123',
request_id=self.ctx.request_id, is_admin=False).to_dict()
stored_context = self.stack.stored_context().to_dict()
self.assertEqual(expected_context, stored_context)
# Store again, ID should not change
self.stack.store()
self.assertEqual(user_creds_id, db_stack.user_creds_id)
def test_backup_copies_user_creds_id(self):
ctx_init = utils.dummy_context(user='my_user',
password='<PASSWORD>')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_init', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
self.assertEqual(creds.id, self.stack.user_creds_id)
backup = self.stack._backup_stack()
self.assertEqual(creds.id, backup.user_creds_id)
def test_stored_context_err(self):
"""Test stored_context error path."""
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
ex = self.assertRaises(exception.Error, self.stack.stored_context)
expected_err = 'Attempt to use stored_context with no user_creds'
self.assertEqual(expected_err, six.text_type(ex))
def test_store_gets_username_from_stack(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store()
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('foobar', db_stack.username)
def test_store_backup_true(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store(backup=True)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertTrue(db_stack.backup)
def test_store_backup_false(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
self.tmpl, username='foobar')
self.ctx.username = 'not foobar'
self.stack.store(backup=False)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertFalse(db_stack.backup)
def test_init_stored_context_false(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='<PASSWORD>')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store1', self.tmpl,
user_creds_id=creds.id,
use_stored_context=False)
ctx_expected = self.ctx.to_dict()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
self.stack.store()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
def test_init_stored_context_true(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='<PASSWORD>')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store2', self.tmpl,
user_creds_id=creds.id,
use_stored_context=True)
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
self.assertEqual(ctx_expected, self.stack.context.to_dict())
self.stack.store()
self.assertEqual(ctx_expected, self.stack.context.to_dict())
def test_load_stored_context_false(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='<PASSWORD>')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store3', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
use_stored_context=False)
self.assertEqual(self.ctx.to_dict(), load_stack.context.to_dict())
def test_load_stored_context_true(self):
ctx_init = utils.dummy_context(user='mystored_user',
password='<PASSWORD>')
ctx_init.request_id = self.ctx.request_id
creds = ucreds_object.UserCreds.create(ctx_init)
self.stack = stack.Stack(self.ctx, 'creds_store4', self.tmpl,
user_creds_id=creds.id)
self.stack.store()
ctx_expected = ctx_init.to_dict()
ctx_expected['auth_token'] = None
load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
use_stored_context=True)
self.assertEqual(ctx_expected, load_stack.context.to_dict())
def test_load_honors_owner(self):
"""Loading a stack from the database will set the owner_id.
Loading a stack from the database will set the owner_id of the
resultant stack appropriately.
"""
self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)
stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,
owner_id=self.stack.id)
stack_ownee.store()
saved_stack = stack.Stack.load(self.ctx, stack_id=stack_ownee.id)
self.assertEqual(self.stack.id, saved_stack.owner_id)
def test_requires_deferred_auth(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'},
'CResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.assertFalse(self.stack.requires_deferred_auth())
self.stack['CResource'].requires_deferred_auth = True
self.assertTrue(self.stack.requires_deferred_auth())
def test_stack_user_project_id_default(self):
self.stack = stack.Stack(self.ctx, 'user_project_none', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertIsNone(db_stack.stack_user_project_id)
def test_stack_user_project_id_constructor(self):
self.stub_keystoneclient()
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'user_project_init',
self.tmpl,
stack_user_project_id='aproject1234')
self.stack.store()
self.assertEqual('aproject1234', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aproject1234', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
self.m.VerifyAll()
def test_stack_user_project_id_setter(self):
self.stub_keystoneclient()
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'user_project_init', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
self.stack.set_stack_user_project_id(project_id='aproject456')
self.assertEqual('aproject456', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aproject456', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
self.m.VerifyAll()
def test_stack_user_project_id_create(self):
self.stub_keystoneclient()
self.m.ReplayAll()
self.stack = stack.Stack(self.ctx, 'user_project_init', self.tmpl)
self.stack.store()
self.assertIsNone(self.stack.stack_user_project_id)
self.stack.create_stack_user_project_id()
self.assertEqual('aprojectid', self.stack.stack_user_project_id)
db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
self.assertEqual('aprojectid', db_stack.stack_user_project_id)
self.stack.delete()
self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE),
self.stack.state)
self.m.VerifyAll()
def test_preview_resources_returns_list_of_resource_previews(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = stack.Stack(self.ctx, 'preview_stack',
template.Template(tmpl))
res = mock.Mock()
res.preview.return_value = 'foo'
self.stack._resources = {'r1': res}
resources = self.stack.preview_resources()
self.assertEqual(['foo'], resources)
def test_correct_outputs(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'def'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_correct_outputs',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
# According _resolve_attribute method in GenericResource output
# value will be equal with name AResource.
self.stack._update_all_resource_data(False, True)
self.assertEqual('AResource',
self.stack.outputs['Resource_attr'].get_value())
self.stack.delete()
self.assertEqual((self.stack.DELETE, self.stack.COMPLETE),
self.stack.state)
def test_incorrect_outputs(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'AResource': {'Type': 'ResourceWithPropsType',
'Properties': {'Foo': 'abc'}}},
'Outputs': {
'Resource_attr': {
'Value': {
'Fn::GetAtt': ['AResource', 'Bar']}}}}
self.stack = stack.Stack(self.ctx, 'stack_with_incorrect_outputs',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
self.stack.state)
ex = self.assertRaises(exception.InvalidTemplateAttribute,
self.stack.outputs['Resource_attr'].get_value)
self.assertIn('The Referenced Attribute (AResource | |
<reponame>jonassoenen/noise_robust_cobras
import copy
import gc
import itertools
import logging
from enum import Enum
from typing import Union
import numpy as np
from noise_robust_cobras.cluster import Cluster
from noise_robust_cobras.clustering import Clustering
from noise_robust_cobras.clustering_algorithms.clustering_algorithms import (
KMeansClusterAlgorithm,
ClusterAlgorithm,
)
from noise_robust_cobras.cobras_logger import ClusteringLogger
from noise_robust_cobras.strategies.splitlevel_estimation import (
StandardSplitLevelEstimationStrategy,
)
from noise_robust_cobras.strategies.superinstance_selection import (
SuperinstanceSelectionHeuristic,
MostInstancesSelectionHeuristic,
LeastInstancesSelectionHeuristic,
)
from noise_robust_cobras.superinstance import SuperInstance, SuperInstanceBuilder
from noise_robust_cobras.superinstance_kmeans import KMeans_SuperinstanceBuilder
from noise_robust_cobras.noise_robust.datastructures.certainty_constraint_set import (
NewCertaintyConstraintSet,
)
from noise_robust_cobras.noise_robust.datastructures.constraint import Constraint
from noise_robust_cobras.noise_robust.datastructures.constraint_index import (
ConstraintIndex,
)
from noise_robust_cobras.noise_robust.noise_robust_possible_worlds import (
gather_extra_evidence,
)
from noise_robust_cobras.querier.querier import MaximumQueriesExceeded
class SplitResult(Enum):
SUCCESS = 1
NO_SPLIT_POSSIBLE = 2
SPLIT_FAILED = 3
class COBRAS:
certainty_constraint_set: NewCertaintyConstraintSet
clustering: Union[Clustering, None]
def __init__(
self,
cluster_algo: ClusterAlgorithm = KMeansClusterAlgorithm(),
superinstance_builder: SuperInstanceBuilder = KMeans_SuperinstanceBuilder(),
split_superinstance_selection_heur: SuperinstanceSelectionHeuristic = None,
splitlevel_strategy=None,
noise_probability=0.10,
minimum_approximation_order=2,
maximum_approximation_order=3,
certainty_threshold=0.95,
seed=None,
correct_noise=True,
logger=None,
cobras_logger=None,
):
self.seed = seed
# init data, querier, max_questions, train_indices and store_intermediate results
# already initialised so object size does not change during execution
# python can optimize
self.data = None
self.querier = None
self.train_indices = None
# init cobras_cluster_algo
self.cluster_algo = cluster_algo
self.superinstance_builder = superinstance_builder
# init split superinstance selection heuristic
if split_superinstance_selection_heur is None:
self.split_superinstance_selection_heur = MostInstancesSelectionHeuristic()
else:
self.split_superinstance_selection_heur = split_superinstance_selection_heur
# init splitlevel_heuristic
if splitlevel_strategy is None:
self.splitlevel_strategy = StandardSplitLevelEstimationStrategy(
LeastInstancesSelectionHeuristic()
)
else:
self.splitlevel_strategy = splitlevel_strategy
# variables used during execution
self.clustering_to_store = None
self.clustering = None
self.random_generator = None
# logging
self._log = logging.getLogger(__name__) if logger is None else logger
self._cobras_log = (
ClusteringLogger() if cobras_logger is None else cobras_logger
)
# certainty_constraint_set
if correct_noise:
self.certainty_constraint_set: NewCertaintyConstraintSet = NewCertaintyConstraintSet(
minimum_approximation_order,
maximum_approximation_order,
noise_probability,
self._cobras_log,
)
self.constraint_index = self.certainty_constraint_set.constraint_index
else:
self.certainty_constraint_set = None
self.constraint_index = ConstraintIndex()
self.certainty_threshold = certainty_threshold
self.correct_noise = correct_noise
@property
def clustering_logger(self):
return self._cobras_log
def fit(self, X, nb_clusters, train_indices, querier):
"""
Perform clustering.
The number of clusters (nb_clusters) is not used in COBRAS but is added as a parameter to have a consistent
interface over all clustering algorithms
:param X: numpy array that where each row is an instance
:param nb_clusters: IGNORED, COBRAS determines the amount of clusters dynamically
:param train_indices: the indices for which COBRAS can ask constraints, if there is no training test_set use None
:param querier: a Querier object that can answer queries about the data X
:return: a tuple(all_clusters, runtimes, ml, cl) where all_clusters are the intermediate clusterings (for each query there is an intermediate clustering stored)
runtimes is the time the algorithm has been executing after each query
ml and cl are both lists of tuples representing the must-link and cannot-link constraints
note: these are the constraints that we got from the user! So there might be noisy constraints in these lists!
"""
self.random_generator = np.random.default_rng(self.seed)
self._cobras_log.log_start_clustering()
self.data = X
self.train_indices = (
train_indices if train_indices is not None else range(len(X))
)
self.split_superinstance_selection_heur.set_clusterer(self)
self.splitlevel_strategy.set_clusterer(self)
self.querier = querier
# initial clustering: all instances in one superinstance in one cluster
initial_superinstance = self.create_superinstance(
list(range(self.data.shape[0]))
)
initial_clustering = Clustering([Cluster([initial_superinstance])])
self.clustering = initial_clustering
# last valid clustering keeps the last completely merged clustering
last_valid_clustering = None
while not self.querier.query_limit_reached():
# during this iteration store the current clustering
self._cobras_log.update_clustering_to_store(self.clustering)
self.clustering_to_store = self.clustering.construct_cluster_labeling()
# splitting phase
self._cobras_log.log_entering_phase("splitting")
statuscode = self.split_next_superinstance()
if statuscode == SplitResult.NO_SPLIT_POSSIBLE:
# there is no split left to be done
# we have produced the best clustering
break
elif statuscode == SplitResult.SPLIT_FAILED:
# tried to split a superinstance but failed to split it
# this is recorded in the superinstance
# we will split another superinstance in the next iteration
continue
# merging phase
self._cobras_log.log_entering_phase("merging")
if self.correct_noise:
# make a copy of the current clustering and perform the merging phase on it
clustering_copy = copy.deepcopy(self.clustering)
fully_merged, new_user_constraints = self.merge_containing_clusters(
clustering_copy
)
corrected_clustering = None
if fully_merged:
# if we fully merged we can confirm and correct the clustering
# if not the query limit is reached so we have to stop
try:
fully_merged, corrected_clustering = self.confirm_and_correct(
new_user_constraints, clustering_copy
)
except MaximumQueriesExceeded:
# if during the confirm and correct the query limit is reached fully_merged is false
fully_merged = False
self.clustering = corrected_clustering
# explicit call to garbage collector to avoid memory problems
gc.collect()
else:
fully_merged, _ = self.merge_containing_clusters(self.clustering)
# correctly log intermediate results
if fully_merged:
self._cobras_log.update_last_intermediate_result(self.clustering)
# fill in the last_valid_clustering whenever appropriate
# after initialisation or after that the current clustering is fully merged
if fully_merged or last_valid_clustering is None:
last_valid_clustering = copy.deepcopy(self.clustering)
self.clustering = last_valid_clustering
self._cobras_log.log_end_clustering()
# collect results and return
all_clusters = self._cobras_log.get_all_clusterings()
runtimes = self._cobras_log.get_runtimes()
ml, cl = self._cobras_log.get_ml_cl_constraint_lists()
return all_clusters, runtimes, ml, cl
###########################
# SPLITTING #
###########################
def split_next_superinstance(self):
"""
Execute the splitting phase:
1) select the next super-instance to split
2) split the super-instance into multiple smaller super-instances
:return:
"""
# identify the next superinstance to split
to_split, originating_cluster = self.identify_superinstance_to_split()
if to_split is None:
return SplitResult.NO_SPLIT_POSSIBLE
# remove to_split from the clustering
originating_cluster.super_instances.remove(to_split)
if len(originating_cluster.super_instances) == 0:
self.clustering.clusters.remove(originating_cluster)
# split to_split into new clusters
split_level = self.determine_split_level(to_split)
new_super_instances = self.split_superinstance(to_split, split_level)
self._log.info(
f"Splitted super-instance {to_split.representative_idx} in {split_level} new super-instances {list(si.representative_idx for si in new_super_instances)}"
)
new_clusters = self.add_new_clusters_from_split(new_super_instances)
if not new_clusters:
# it is possible that splitting a super-instance does not lead to a new cluster:
# e.g. a super-instance constains 2 points, of which one is in the test set
# in this case, the super-instance can be split into two new ones, but these will be joined
# again immediately, as we cannot have super-instances containing only test points (these cannot be
# queried)
# this case handles this, we simply add the super-instance back to its originating cluster,
# and set the already_tried flag to make sure we do not keep trying to split this superinstance
self._log.info("Split failed! restoring original state")
originating_cluster.super_instances.append(to_split)
to_split.tried_splitting = True
to_split.children = None
if originating_cluster not in self.clustering.clusters:
self.clustering.clusters.append(originating_cluster)
return SplitResult.SPLIT_FAILED
else:
self.clustering.clusters.extend(new_clusters)
return SplitResult.SUCCESS
def identify_superinstance_to_split(self):
"""
Identify the next super-instance that needs to be split using the split superinstance selection heuristic
:return: (the super instance to split, the cluster from which the super instance originates)
"""
# if there is only one superinstance return that superinstance as superinstance to split
if (
len(self.clustering.clusters) == 1
and len(self.clustering.clusters[0].super_instances) == 1
):
return (
self.clustering.clusters[0].super_instances[0],
self.clustering.clusters[0],
)
options = []
for cluster in self.clustering.clusters:
if cluster.is_pure:
continue
if cluster.is_finished:
continue
for superinstance in cluster.super_instances:
if superinstance.tried_splitting:
continue
if len(superinstance.indices) == 1:
continue
if len(superinstance.train_indices) < 2:
continue
else:
options.append(superinstance)
if len(options) == 0:
return None, None
superinstance_to_split = self.split_superinstance_selection_heur.choose_superinstance(
options
)
originating_cluster = [
cluster
for cluster in self.clustering.clusters
if superinstance_to_split in cluster.super_instances
][0]
if superinstance_to_split is None:
return None, None
return superinstance_to_split, originating_cluster
def determine_split_level(self, superinstance):
"""
Determine the splitting level to split the given super-instance
"""
return self.splitlevel_strategy.estimate_splitting_level(superinstance)
def split_superinstance(self, si, k):
"""
Actually split the given super-instance si in k (the splitlevel) new super-instances
note: if splitting with self.cluster_algo results in a super-instance that has no training_instances,
this super-instance is merged with another super-instance that does still have training instances
:param si: the super-instance to be split
:param k: the splitlevel to be used
:return: A list with the resulting super-instances
:rtype List[Superinstance]
"""
# cluster the instances of the superinstance
clusters = self.cluster_algo.cluster(
self.data, si.indices, k, [], [], seed=self.random_generator.integers(1,1000000)
)
# based on the resulting clusters make new superinstances
# superinstances with no training instances are assigned to the closest superinstance with training instances
training = []
no_training = []
for new_si_idx in set(clusters):
cur_indices = [
si.indices[idx] for idx, c in enumerate(clusters) if c == new_si_idx
]
si_train_indices = [x for x in cur_indices if x in self.train_indices]
if len(si_train_indices) != 0:
training.append(self.create_superinstance(cur_indices, si))
else:
no_training.append(
(cur_indices, np.mean(self.data[cur_indices, :], axis=0))
)
for indices, centroid in no_training:
closest_train = min(
training,
key=lambda x: np.linalg.norm(
self.data[x.representative_idx, :] - centroid
),
)
closest_train.indices.extend(indices)
si.children = training
return training
@staticmethod
def add_new_clusters_from_split(si):
"""
small helper function: adds the new super-instances to the current clustering each in their own cluster
"""
new_clusters = []
for x in si:
new_clusters.append(Cluster([x]))
if len(new_clusters) == 1:
return | |
NUM},
"<cjt>|<card>|NUM|M|P|@N<": {POS: NUM},
"<cjt>|<card>|NUM|M|P|@P<": {POS: NUM},
"<cjt>|<card>|NUM|M|S|@>N": {POS: NUM},
"<cjt>|<card>|NUM|M|S|@N<": {POS: NUM},
"<cjt>|<card>|NUM|M|S|@N<PRED": {POS: NUM},
"<cjt>|<card>|NUM|M|S|@P<": {POS: NUM},
"<cjt>|<card>|NUM|M|S|@PRED>": {POS: NUM},
"<cjt>|<co-prparg>|<np-idf>|N|M|P|@P<": {POS: NOUN},
"<cjt>|<co-vfin>|KC|@<ADVL": {POS: CCONJ},
"<cjt>|<com>|PRP|@<ADVL": {POS: ADP},
"<cjt>|<com>|PRP|@PRED>": {POS: ADP},
"<cjt>|<dem>|<first-cjt>|INDP|M|S|@KOMP<": {POS: PRON},
"<cjt>|<dem>|<np-idf>|DET|F|P|@<SC": {POS: PRON},
"<cjt>|<dem>|<np-idf>|DET|F|S|@N<PRED": {POS: DET},
"<cjt>|<dem>|<np-idf>|DET|M|P|@<SC": {POS: PRON},
"<cjt>|<dem>|DET|F|P|@<SC": {POS: PRON},
"<cjt>|<dem>|DET|F|P|@<SUBJ": {POS: PRON},
"<cjt>|<dem>|DET|F|P|@P<": {POS: PRON},
"<cjt>|<dem>|DET|F|P|@SUBJ>": {POS: PRON},
"<cjt>|<dem>|DET|F|S|@APP": {POS: DET},
"<cjt>|<dem>|DET|M|P|@<ACC": {POS: PRON},
"<cjt>|<dem>|DET|M|P|@P<": {POS: PRON},
"<cjt>|<dem>|DET|M|P|@SUBJ>": {POS: PRON},
"<cjt>|<dem>|DET|M|S|@<ACC": {POS: PRON},
"<cjt>|<dem>|DET|M|S|@APP": {POS: DET},
"<cjt>|<dem>|DET|M|S|@P<": {POS: PRON},
"<cjt>|<dem>|DET|M|S|@SUBJ>": {POS: PRON},
"<cjt>|<diff>|<KOMP>|<np-def>|DET|F|S|@SUBJ>": {POS: PRON},
"<cjt>|<diff>|<KOMP>|DET|F|P|@P<": {POS: PRON},
"<cjt>|<diff>|<KOMP>|DET|F|S|@P<": {POS: PRON},
"<cjt>|<diff>|<KOMP>|DET|M|P|@P<": {POS: PRON},
"<cjt>|<diff>|DET|F|S|@P<": {POS: PRON},
"<cjt>|<diff>|DET|M|S|@N<PRED": {POS: DET},
"<cjt>|<diff>|DET|M|S|@SUBJ>": {POS: PRON},
"<cjt>|<first-cjt>|<COMP>|ADJ|M|S|@N<": {POS: ADJ},
"<cjt>|<first-cjt>|<NUM-ord>|<np-def>|ADJ|M|S|@<ADVL": {POS: ADJ},
"<cjt>|<first-cjt>|<n>|<np-def>|ADJ|M|S|@P<": {POS: ADJ},
"<cjt>|<first-cjt>|<np-def>|N|F|P|@P<": {POS: NOUN},
"<cjt>|<first-cjt>|<np-def>|N|F|S|@P<": {POS: NOUN},
"<cjt>|<first-cjt>|<np-def>|N|F|S|@SUBJ>": {POS: NOUN},
"<cjt>|<first-cjt>|<np-def>|N|M|P|@<ACC": {POS: NOUN},
"<cjt>|<first-cjt>|<np-def>|N|M|P|@<SUBJ": {POS: NOUN},
"<cjt>|<first-cjt>|<np-def>|N|M|S|@<SUBJ": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|F|P|@P<": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|F|S|@<ACC": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|F|S|@APP": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|F|S|@N<PRED": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|F|S|@P<": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|M|P|@<ACC": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|M|P|@APP": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|M|P|@P<": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|M|S|@APP": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|M|S|@N<PRED": {POS: NOUN},
"<cjt>|<first-cjt>|<np-idf>|N|M|S|@P<": {POS: NOUN},
"<cjt>|<first-cjt>|<sam->|PRP|@N<": {POS: ADP},
"<cjt>|<first-cjt>|ADJ|F|S|@N<PRED": {POS: ADJ},
"<cjt>|<first-cjt>|ADJ|F|S|@NPHR": {POS: ADJ},
"<cjt>|<first-cjt>|ADJ|M|P|@FS-STA": {POS: ADJ},
"<cjt>|<first-cjt>|ADJ|M|S|@N<PRED": {POS: ADJ},
"<cjt>|<first-cjt>|ADV|@ADVL": {POS: ADV},
"<cjt>|<first-cjt>|PERS|F|3S|NOM|@N<PRED": {POS: PRON},
"<cjt>|<first-cjt>|PROP|F|S|@APP": {POS: PROPN},
"<cjt>|<first-cjt>|PROP|F|S|@NPHR": {POS: PROPN},
"<cjt>|<first-cjt>|PROP|M|S|@<ACC": {POS: PROPN},
"<cjt>|<first-cjt>|PROP|M|S|@<SC": {POS: PROPN},
"<cjt>|<first-cjt>|PROP|M|S|@APP": {POS: PROPN},
"<cjt>|<first-cjt>|PROP|M|S|@N<PRED": {POS: PROPN},
"<cjt>|<first-cjt>|PROP|M|S|@SUBJ>": {POS: PROPN},
"<cjt>|<first-cjt>|PRP|@<ADVL": {POS: ADP},
"<cjt>|<first-cjt>|PRP|@<PIV": {POS: ADP},
"<cjt>|<first-cjt>|PRP|@N<": {POS: ADP},
"<cjt>|<interr>|ADV|@<ADVL": {POS: ADV},
"<cjt>|<interr>|ADV|@ADVL>": {POS: ADV},
"<cjt>|<kc>|<first-cjt>|ADV|@ADVL>": {POS: ADV},
"<cjt>|<meta>|<first-cjt>|<np-idf>|N|F|S|@<ACC": {POS: NOUN},
"<cjt>|<meta>|<first-cjt>|<np-idf>|N|M|S|@P<": {POS: NOUN},
"<cjt>|<mv>|<se-passive>|V|FUT|3S|SUBJ|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|IMPF|3P|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|IMPF|3P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|IMPF|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|IMPF|3S|IND|@ICL-N<": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|IMPF|3S|SUBJ|@FS-P<": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|INF|3P|@ICL-P<": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|MQP|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3P|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3P|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3P|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3P|SUBJ|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3P|SUBJ|@FS-<SUBJ": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3S|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS/MQP|3P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS|3P|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS|3P|IND|@FS-QUE": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS|3S|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS|3S|IND|@FS-<SUBJ": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS|3S|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|<se-passive>|V|PS|3S|IND|@N<PRED": {POS: VERB},
"<cjt>|<mv>|V|COND|1P|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|COND|1S|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|COND|3P|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|COND|3P|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|COND|3S|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|COND|3S|@FS-ACC>": {POS: AUX},
"<cjt>|<mv>|V|COND|3S|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|COND|3S|@FS-QUE": {POS: VERB},
"<cjt>|<mv>|V|COND|3S|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|COND|3S|@N<ARG": {POS: AUX},
"<cjt>|<mv>|V|FUT|1S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|FUT|3P|IND|@FS-<SC": {POS: AUX},
"<cjt>|<mv>|V|FUT|3P|IND|@FS-<SUBJ": {POS: AUX},
"<cjt>|<mv>|V|FUT|3P|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|FUT|3P|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|FUT|3P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|FUT|3P|IND|@N<": {POS: VERB},
"<cjt>|<mv>|V|FUT|3P|SUBJ|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|FUT|3P|SUBJ|@FS-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|IND|@FS-<SUBJ": {POS: AUX},
"<cjt>|<mv>|V|FUT|3S|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|IND|@FS-UTT": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|IND|@N<ARG": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|SUBJ|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|SUBJ|@FS-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|FUT|3S|SUBJ|@ICL-P<": {POS: VERB},
"<cjt>|<mv>|V|GER|@ADVL>": {POS: VERB},
"<cjt>|<mv>|V|GER|@ICL-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|GER|@ICL-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|GER|@ICL-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|GER|@N<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|1P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|IMPF|1S|IND|@FS-A<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|1S|IND|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|IMPF|1S|IND|@FS-KOMP<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|1S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|IND|@ADVL>": {POS: AUX},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-<ACC": {POS: AUX},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-KOMP<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-QUE": {POS: AUX},
"<cjt>|<mv>|V|IMPF|3P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|IND|@ICL-STA": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3P|SUBJ|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@FS-ACC>": {POS: AUX},
"<cjt>|<mv>|V|IMPF|3S|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@FS-SUBJ>": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@ICL-N<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|IND|@N<": {POS: VERB},
"<cjt>|<mv>|V|IMPF|3S|SUBJ|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|INF|1P|@ICL-<SUBJ": {POS: VERB},
"<cjt>|<mv>|V|INF|3P|@ICL-<SUBJ": {POS: VERB},
"<cjt>|<mv>|V|INF|3P|@ICL-P<": {POS: VERB},
"<cjt>|<mv>|V|INF|3S|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|INF|3S|@ICL-<SUBJ": {POS: VERB},
"<cjt>|<mv>|V|INF|3S|@ICL-P<": {POS: VERB},
"<cjt>|<mv>|V|INF|@FS-QUE": {POS: VERB},
"<cjt>|<mv>|V|INF|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|INF|@ICL-<ACC": {POS: VERB},
"<cjt>|<mv>|V|INF|@ICL-<SC": {POS: VERB},
"<cjt>|<mv>|V|INF|@ICL-AUX<": {POS: VERB},
"<cjt>|<mv>|V|INF|@ICL-KOMP<": {POS: VERB},
"<cjt>|<mv>|V|INF|@ICL-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|INF|@ICL-P<": {POS: VERB},
"<cjt>|<mv>|V|INF|@P<": {POS: VERB},
"<cjt>|<mv>|V|MQP|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|MQP|3S|IND|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|MQP|3S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PCP|@ICL-AUX<": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|P|@ICL-AUX<": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|P|@ICL-N<": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|P|@ICL-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|P|@N<": {POS: ADJ},
"<cjt>|<mv>|V|PCP|F|S|@ICL-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|S|@ICL-<SC": {POS: ADJ},
"<cjt>|<mv>|V|PCP|F|S|@ICL-AUX<": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|S|@ICL-N<": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|S|@ICL-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|S|@ICL-PRED>": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|S|@N<": {POS: VERB},
"<cjt>|<mv>|V|PCP|F|S|@PRED>": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|P|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|P|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|P|@ICL-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|P|@ICL-AUX<": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|P|@ICL-N<": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|P|@ICL-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|P|@ICL-P<": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|P|@N<": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@ICL-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@ICL-<PRED": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@ICL-<SC": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@ICL-AUX<": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@ICL-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@ICL-PRED>": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@N<": {POS: VERB},
"<cjt>|<mv>|V|PCP|M|S|@N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PR|1P|IND|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|PR|1P|IND|@FS-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|PR|1P|IND|@FS-EXC": {POS: VERB},
"<cjt>|<mv>|V|PR|1P|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|PR|1P|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PR|1P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PR|1P|SUBJ|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PR|1S|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PR|1S|IND|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|PR|1S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@<SC": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-<SUBJ": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-APP": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-KOMP<": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@FS-QUE": {POS: AUX},
"<cjt>|<mv>|V|PR|3P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@N<": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|IND|@NPHR": {POS: AUX},
"<cjt>|<mv>|V|PR|3P|SUBJ|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|SUBJ|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|SUBJ|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|SUBJ|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|PR|3P|SUBJ|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-<PIV": {POS: AUX},
"<cjt>|<mv>|V|PR|3S|IND|@FS-<SC": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-<SUBJ": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-A<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-APP": {POS: AUX},
"<cjt>|<mv>|V|PR|3S|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-QUE": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-S<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@FS-SUBJ>": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@ICL-ADVL>": {POS: AUX},
"<cjt>|<mv>|V|PR|3S|IND|@ICL-AUX<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@N<ARG": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@NPHR": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@P<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|IND|@STA": {POS: AUX},
"<cjt>|<mv>|V|PR|3S|SUBJ|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@FS-<SC": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@FS-<SUBJ": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@FS-COM<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@ICL-N<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@ICL-P<": {POS: VERB},
"<cjt>|<mv>|V|PR|3S|SUBJ|@N<": {POS: VERB},
"<cjt>|<mv>|V|PS/MQP|3P|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PS/MQP|3P|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PS/MQP|3P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PS/MQP|3P|IND|@ICL-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PS/MQP|3P|IND|@N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PS|1P|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PS|1P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PS|1S|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PS|1S|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PS|1S|IND|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|PS|1S|IND|@FS-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|PS|1S|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|PS|1S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PS|3P|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PS|3P|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PS|3P|IND|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|PS|3P|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|PS|3P|IND|@FS-QUE": {POS: AUX},
"<cjt>|<mv>|V|PS|3P|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-<ACC": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-<ADVL": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-ACC>": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-ADVL>": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-KOMP<": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-N<": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-P<": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-S<": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-STA": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@FS-UTT": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@ICL-N<": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@ICL-N<PRED": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@ICL-QUE": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@ICL-STA": {POS: VERB},
"<cjt>|<mv>|V|PS|3S|IND|@N<PRED": {POS: VERB},
"<cjt>|<n>|<NUM-ord>|ADJ|M|S|@P<": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|F|P|@<SUBJ": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|F|S|@<ACC": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|P|@<ACC": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|P|@<SUBJ": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|P|@ACC>": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|P|@N<PRED": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|P|@P<": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|S|@<ACC": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|S|@<SC": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|S|@APP": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|S|@NPHR": {POS: ADJ},
"<cjt>|<n>|<np-def>|ADJ|M|S|@P<": {POS: ADJ},
"<cjt>|<n>|<np-idf>|ADJ|F|S|@N<PRED": {POS: ADJ},
"<cjt>|<n>|<np-idf>|ADJ|M|S|@SC>": {POS: ADJ},
"<cjt>|<n>|ADJ|F|P|@SUBJ>": {POS: ADJ},
"<cjt>|<n>|ADJ|F|S|@N<PRED": {POS: ADJ},
"<cjt>|<n>|ADJ|F|S|@P<": {POS: ADJ},
"<cjt>|<n>|ADJ|M/F|P|@SUBJ>": {POS: ADJ},
"<cjt>|<n>|ADJ|M|P|@<SUBJ": {POS: ADJ},
"<cjt>|<n>|ADJ|M|P|@APP": {POS: ADJ},
"<cjt>|<n>|ADJ|M|P|@P<": {POS: ADJ},
"<cjt>|<n>|ADJ|M|P|@SUBJ>": {POS: ADJ},
"<cjt>|<n>|ADJ|M|S|@P<": {POS: ADJ},
"<cjt>|<n>|ADJ|M|S|@SUBJ>": {POS: ADJ},
"<cjt>|<n>|V|PCP|M|P|@<SUBJ": {POS: VERB},
"<cjt>|<n>|V|PCP|M|P|@N<PRED": {POS: VERB},
"<cjt>|<n>|V|PCP|M|P|@P<": {POS: VERB},
"<cjt>|<n>|V|PCP|M|P|@SUBJ>": {POS: VERB},
"<cjt>|<n>|V|PCP|M|S|@<SC": {POS: ADJ},
"<cjt>|<np-def>|ADJ|M|S|@N<PRED": {POS: ADJ},
"<cjt>|<np-def>|N|F|P|@<ACC": {POS: NOUN},
"<cjt>|<np-def>|N|F|P|@<SC": {POS: NOUN},
"<cjt>|<np-def>|N|F|P|@<SUBJ": {POS: NOUN},
"<cjt>|<np-def>|N|F|P|@ACC>": {POS: NOUN},
"<cjt>|<np-def>|N|F|P|@APP": {POS: NOUN},
"<cjt>|<np-def>|N|F|P|@N<PRED": {POS: NOUN},
"<cjt>|<np-def>|N|F|P|@P<": {POS: NOUN},
"<cjt>|<np-def>|N|F|P|@SUBJ>": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@<ACC": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@<ADVL": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@<OC": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@<SC": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@<SUBJ": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@ACC>": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@APP": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@ICL-APP": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@N<": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@N<PRED": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@P<": {POS: NOUN},
"<cjt>|<np-def>|N|F|S|@SUBJ>": {POS: NOUN},
"<cjt>|<np-def>|N|M|P|@<ACC": {POS: SYM},
"<cjt>|<np-def>|N|M|P|@<ADVL": {POS: NOUN},
"<cjt>|<np-def>|N|M|P|@<SC": {POS: NOUN},
"<cjt>|<np-def>|N|M|P|@<SUBJ": {POS: NOUN},
"<cjt>|<np-def>|N|M|P|@APP": {POS: NOUN},
"<cjt>|<np-def>|N|M|P|@FS-N<": {POS: NOUN},
"<cjt>|<np-def>|N|M|P|@N<": {POS: NOUN},
"<cjt>|<np-def>|N|M|P|@N<PRED": {POS: SYM},
"<cjt>|<np-def>|N|M|P|@P<": {POS: SYM},
"<cjt>|<np-def>|N|M|P|@SUBJ>": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@<ACC": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@<ADVL": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@<SC": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@<SUBJ": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@ACC>": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@APP": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@FS-STA": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@ICL-<ACC": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@ICL-PRED>": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@KOMP<": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@N<PRED": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@NPHR": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@P<": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@SC>": {POS: NOUN},
"<cjt>|<np-def>|N|M|S|@SUBJ>": {POS: NOUN},
"<cjt>|<np-idf>|ADJ|M|P|@<ADVL": {POS: ADJ},
"<cjt>|<np-idf>|ADJ|M|S|@N<PRED": {POS: ADJ},
"<cjt>|<np-idf>|N|F|P|@<ACC": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@<ADVL": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@<OC": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@<SC": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@<SUBJ": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@ACC>": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@APP": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@N<": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@N<PRED": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@NPHR": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@P<": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@PASS": {POS: NOUN},
"<cjt>|<np-idf>|N|F|P|@SUBJ>": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@<ACC": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@<ADVL": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@<OC": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@<SC": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@<SUBJ": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@ACC>": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@ADVL": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@APP": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@FS-S<": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@ICL-APP": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@N<": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@N<PRED": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@NPHR": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@P<": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@PRED>": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@SC>": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@SUBJ>": {POS: NOUN},
"<cjt>|<np-idf>|N|F|S|@VOK": {POS: NOUN},
"<cjt>|<np-idf>|N|M/F|P|@P<": {POS: NOUN},
"<cjt>|<np-idf>|N|M|P|@<ACC": {POS: NOUN},
"<cjt>|<np-idf>|N|M|P|@<OC": {POS: NOUN},
"<cjt>|<np-idf>|N|M|P|@<SC": {POS: NOUN},
"<cjt>|<np-idf>|N|M|P|@<SUBJ": | |
as input
Returns:
TimeDataset
"""
return time_dataset.GlobalTimeDataset(
df_dict,
predict_mode=predict_mode,
n_lags=self.n_lags,
n_forecasts=self.n_forecasts,
season_config=self.season_config,
events_config=self.events_config,
country_holidays_config=self.country_holidays_config,
covar_config=self.config_covar,
regressors_config=self.regressors_config,
)
def __handle_missing_data(self, df, freq, predicting):
"""Checks, auto-imputes and normalizes new data
Args:
df (pd.DataFrame): raw data with columns 'ds' and 'y'
freq (str): data frequency
predicting (bool): when no lags, allow NA values in 'y' of forecast series or 'y' to miss completely
Returns:
pre-processed df
"""
if self.n_lags == 0 and not predicting:
# we can drop rows with NA in y
sum_na = sum(df["y"].isna())
if sum_na > 0:
df = df[df["y"].notna()]
log.info("dropped {} NAN row in 'y'".format(sum_na))
# add missing dates for autoregression modelling
if self.n_lags > 0:
df, missing_dates = df_utils.add_missing_dates_nan(df, freq=freq)
if missing_dates > 0:
if self.impute_missing:
log.info("{} missing dates added.".format(missing_dates))
else:
raise ValueError(
"{} missing dates found. Please preprocess data manually or set impute_missing to True.".format(
missing_dates
)
)
if self.regressors_config is not None:
# if future regressors, check that they are not nan at end, else drop
# we ignore missing events, as those will be filled in with zeros.
reg_nan_at_end = 0
for col in self.regressors_config.keys():
col_nan_at_end = 0
while len(df) > col_nan_at_end and df[col].isnull().iloc[-(1 + col_nan_at_end)]:
col_nan_at_end += 1
reg_nan_at_end = max(reg_nan_at_end, col_nan_at_end)
if reg_nan_at_end > 0:
# drop rows at end due to missing future regressors
df = df[:-reg_nan_at_end]
log.info("Dropped {} rows at end due to missing future regressor values.".format(reg_nan_at_end))
df_end_to_append = None
nan_at_end = 0
while len(df) > nan_at_end and df["y"].isnull().iloc[-(1 + nan_at_end)]:
nan_at_end += 1
if nan_at_end > 0:
if predicting:
# allow nans at end - will re-add at end
if self.n_forecasts > 1 and self.n_forecasts < nan_at_end:
# check that not more than n_forecasts nans, else drop surplus
df = df[: -(nan_at_end - self.n_forecasts)]
# correct new length:
nan_at_end = self.n_forecasts
log.info(
"Detected y to have more NaN values than n_forecast can predict. "
"Dropped {} rows at end.".format(nan_at_end - self.n_forecasts)
)
df_end_to_append = df[-nan_at_end:]
df = df[:-nan_at_end]
else:
# training - drop nans at end
df = df[:-nan_at_end]
log.info(
"Dropped {} consecutive nans at end. "
"Training data can only be imputed up to last observation.".format(nan_at_end)
)
# impute missing values
data_columns = []
if self.n_lags > 0:
data_columns.append("y")
if self.config_covar is not None:
data_columns.extend(self.config_covar.keys())
if self.regressors_config is not None:
data_columns.extend(self.regressors_config.keys())
if self.events_config is not None:
data_columns.extend(self.events_config.keys())
for column in data_columns:
sum_na = sum(df[column].isnull())
if sum_na > 0:
if self.impute_missing:
# use 0 substitution for holidays and events missing values
if self.events_config is not None and column in self.events_config.keys():
df[column].fillna(0, inplace=True)
remaining_na = 0
else:
df.loc[:, column], remaining_na = df_utils.fill_linear_then_rolling_avg(
df[column],
limit_linear=self.impute_limit_linear,
rolling=self.impute_rolling,
)
log.info("{} NaN values in column {} were auto-imputed.".format(sum_na - remaining_na, column))
if remaining_na > 0:
raise ValueError(
"More than {} consecutive missing values encountered in column {}. "
"{} NA remain. Please preprocess data manually.".format(
2 * self.impute_limit_linear + self.impute_rolling, column, remaining_na
)
)
else: # fail because set to not impute missing
raise ValueError(
"Missing values found. " "Please preprocess data manually or set impute_missing to True."
)
if df_end_to_append is not None:
df = df.append(df_end_to_append)
return df
def _handle_missing_data(self, df, freq, predicting=False):
"""Checks, auto-imputes and normalizes new data
Args:
df (dict, pd.DataFrame): dict of dataframes of dataframes containing column 'ds', 'y' with all data
freq (str): data frequency
predicting (bool): when no lags, allow NA values in 'y' of forecast series or 'y' to miss completely
Returns:
pre-processed df
"""
df_is_dict = True
if isinstance(df, pd.DataFrame):
df_is_dict = False
df = {"__df__": df}
elif not isinstance(df, dict):
raise ValueError("Please insert valid df type (i.e. pd.DataFrame, dict)")
df_handled_missing_dict = {}
for key in df:
df_handled_missing_dict[key] = self.__handle_missing_data(df[key], freq, predicting)
if not df_is_dict:
df_handled_missing_dict = df_handled_missing_dict["__df__"]
return df_handled_missing_dict
def _check_dataframe(self, df, check_y=True, exogenous=True):
"""Performs basic data sanity checks and ordering
Prepare dataframe for fitting or predicting.
Args:
df (pd.DataFrame, dict): dataframe or dict of dataframes containing column 'ds'
check_y (bool): if df must have series values
set to True if training or predicting with autoregression
exogenous (bool): whether to check covariates, regressors and events column names
Returns:
pd.DataFrame or dict of pd.DataFrame
"""
df_is_dict = True
if isinstance(df, pd.DataFrame):
df_is_dict = False
df = {"__df__": df}
elif not isinstance(df, dict):
raise ValueError("Please insert valid df type (i.e. pd.DataFrame, dict)")
checked_df = {}
for key, df_i in df.items():
checked_df[key] = df_utils.check_single_dataframe(
df=df_i,
check_y=check_y,
covariates=self.config_covar if exogenous else None,
regressors=self.regressors_config if exogenous else None,
events=self.events_config if exogenous else None,
)
if not df_is_dict:
checked_df = checked_df["__df__"]
return checked_df
def _validate_column_name(self, name, events=True, seasons=True, regressors=True, covariates=True):
"""Validates the name of a seasonality, event, or regressor.
Args:
name (str):
events (bool): check if name already used for event
seasons (bool): check if name already used for seasonality
regressors (bool): check if name already used for regressor
"""
reserved_names = [
"trend",
"additive_terms",
"daily",
"weekly",
"yearly",
"events",
"holidays",
"zeros",
"extra_regressors_additive",
"yhat",
"extra_regressors_multiplicative",
"multiplicative_terms",
]
rn_l = [n + "_lower" for n in reserved_names]
rn_u = [n + "_upper" for n in reserved_names]
reserved_names.extend(rn_l)
reserved_names.extend(rn_u)
reserved_names.extend(["ds", "y", "cap", "floor", "y_scaled", "cap_scaled"])
if name in reserved_names:
raise ValueError("Name {name!r} is reserved.".format(name=name))
if events and self.events_config is not None:
if name in self.events_config.keys():
raise ValueError("Name {name!r} already used for an event.".format(name=name))
if events and self.country_holidays_config is not None:
if name in self.country_holidays_config.holiday_names:
raise ValueError(
"Name {name!r} is a holiday name in {country_holidays}.".format(
name=name, country_holidays=self.country_holidays_config.country
)
)
if seasons and self.season_config is not None:
if name in self.season_config.periods:
raise ValueError("Name {name!r} already used for a seasonality.".format(name=name))
if covariates and self.config_covar is not None:
if name in self.config_covar:
raise ValueError("Name {name!r} already used for an added covariate.".format(name=name))
if regressors and self.regressors_config is not None:
if name in self.regressors_config.keys():
raise ValueError("Name {name!r} already used for an added regressor.".format(name=name))
def _normalize(self, df_dict):
"""Apply data scales.
Applies data scaling factors to df using data_params.
Args:
df_dict (dict): dict of pd.Dataframes each df with columns 'ds', 'y', (and potentially more regressors)
Returns:
df_dict: dict of pd.DataFrame, normalized
"""
for df_name, df_i in df_dict.items():
data_params = self.config_normalization.get_data_params(df_name)
df_dict[df_name] = df_utils.normalize(df_i, data_params)
return df_dict
def _init_train_loader(self, df_dict):
"""Executes data preparation steps and initiates training procedure.
Args:
df_dict (dict): dict of pd.DataFrame containing column 'ds', 'y' with training data
Returns:
torch DataLoader
"""
if not isinstance(df_dict, dict):
raise ValueError("df_dict must be a dict of pd.DataFrames.")
# if not self.fitted:
self.config_normalization.init_data_params(
df_dict=df_dict,
covariates_config=self.config_covar,
regressor_config=self.regressors_config,
events_config=self.events_config,
)
df_dict = self._normalize(df_dict)
# if not self.fitted:
if self.config_trend.changepoints is not None:
# scale user-specified changepoint times
self.config_trend.changepoints = self._normalize(
{"__df__": pd.DataFrame({"ds": pd.Series(self.config_trend.changepoints)})}
)["__df__"]["t"].values
df_merged, _ = df_utils.join_dataframes(df_dict)
df_merged = df_merged.sort_values("ds")
df_merged.drop_duplicates(inplace=True, keep="first", subset=["ds"])
self.season_config = utils.set_auto_seasonalities(df_merged, season_config=self.season_config)
if self.country_holidays_config is not None:
self.country_holidays_config.init_holidays(df_merged)
dataset = self._create_dataset(df_dict, predict_mode=False) # needs to be called after set_auto_seasonalities
self.config_train.set_auto_batch_epoch(n_data=len(dataset))
loader = DataLoader(dataset, batch_size=self.config_train.batch_size, shuffle=True)
# if not self.fitted:
self.model = self._init_model() # needs to be called after set_auto_seasonalities
if self.config_train.learning_rate is None:
self.config_train.learning_rate = self.config_train.find_learning_rate(self.model, dataset)
log.info("lr-range-test selected learning rate: {:.2E}".format(self.config_train.learning_rate))
self.optimizer = self.config_train.get_optimizer(self.model.parameters())
self.scheduler = self.config_train.get_scheduler(self.optimizer, steps_per_epoch=len(loader))
return loader
def _init_val_loader(self, df_dict):
"""Executes data preparation steps and initiates evaluation procedure.
Args:
df_dict (dict): dict of pd.DataFrame containing column 'ds', 'y' with validation data
Returns:
torch DataLoader
"""
df_dict = self._normalize(df_dict)
dataset = self._create_dataset(df_dict, predict_mode=False)
loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False)
return loader
def _get_time_based_sample_weight(self, t):
weight = torch.ones_like(t)
if self.config_train.newer_samples_weight > 1.0:
end_w = self.config_train.newer_samples_weight
start_t = self.config_train.newer_samples_start
time = (t.detach() - start_t) / (1.0 - start_t)
time = torch.maximum(torch.zeros_like(time), time)
time = torch.minimum(torch.ones_like(time), time) # time = 0 to 1
time = torch.pi * (time - 1.0) # time = -pi to 0
time = 0.5 * torch.cos(time) + 0.5 # time = 0 to 1
# scales end to be end weight times bigger than start weight
# with end weight being 1.0
weight = (1.0 + time * (end_w - 1.0)) / end_w
return weight
def _train_epoch(self, e, loader):
"""Make one complete iteration over all | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 The github-release-retry Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a GitHub release and uploads files."""
import argparse
import base64
import json
import os
import sys
import time
import traceback
import typing
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional
import requests
if typing.TYPE_CHECKING:
from dataclasses_json.api import DataClassJsonMixin
else:
from dataclasses_json import DataClassJsonMixin
def remove_none_fields(dic: Any) -> Any:
return {k: v for k, v in dic.items() if v is not None}
def to_dict(obj: DataClassJsonMixin) -> Any: # pylint: disable=used-before-assignment;
return remove_none_fields(obj.to_dict())
def log(message: str) -> None:
print(message, file=sys.stderr) # noqa: T001
def log_exception(message: str) -> None:
log(message)
traceback.print_exc(file=sys.stderr)
log("")
def log_response(response: requests.Response) -> None:
log(f"status_code: {response.status_code}")
if response.content:
try:
content = response.content.decode(encoding="utf-8", errors="ignore")
log(f"content: {content}")
except Exception: # pylint: disable=broad-except;
log(f"content: {response.content!r}")
log("")
def release_asset_node_id_to_asset_id(node_id: str) -> str:
"""
Extracts and returns the asset id from the given Release Asset |node_id|.
The "id" returned from the GraphQL v4 API is called the "node_id" in the REST API v3.
We can get back to the REST "id" by decoding the "node_id" (it is base64 encoded)
and extracting the id number at the end, but this is undocumented and may change.
:param node_id: The Release Asset node_id.
:return: The extracted REST API v3 asset id.
"""
# There is a new format and an old format.
if node_id.startswith("RA_"):
# New format: "RA_[base64 encoded bytes]".
# The last four bytes (big-endian, unsigned) of the base64 encoded bytes are the node id.
# Strip off the "RA_".
base64_string = node_id[3:]
asset_id = str(int.from_bytes(base64.b64decode(base64_string)[-4:], "big"))
else:
# Old format: just a base64 encoded string.
# Once decoded, the format is similar to "012:ReleaseAsset18381577". # noqa: SC100
# The asset id part is 18381577.
node_id_decoded: str = base64.b64decode(node_id).decode(
encoding="utf-8", errors="ignore"
)
if "ReleaseAsset" not in node_id_decoded:
raise AssertionError(
f"Unrecognized node_id format: {node_id}. Decoded (base64) string: {node_id_decoded}."
)
asset_id = node_id_decoded.split("ReleaseAsset")[1]
return asset_id
@dataclass
class GithubResourceError(DataClassJsonMixin):
resource: Optional[str] = None
field: Optional[str] = None
code: Optional[str] = None
@dataclass
class GithubClientError(DataClassJsonMixin):
message: Optional[str] = None
errors: Optional[List[GithubResourceError]] = None
@dataclass
class Asset(DataClassJsonMixin):
url: Optional[str] = None
browser_download_url: Optional[str] = None
id: Optional[str] = None # noqa: VNE003, A003
name: Optional[str] = None
label: Optional[str] = None
state: Optional[str] = None
content_type: Optional[str] = None
size: Optional[int] = None
@dataclass
class Release(DataClassJsonMixin):
upload_url: Optional[str] = None
id: Optional[str] = None # noqa: VNE003, A003
tag_name: Optional[str] = None
target_commitish: Optional[str] = None
name: Optional[str] = None
body: Optional[str] = None
draft: Optional[bool] = None
prerelease: Optional[bool] = None
assets: Optional[List[Asset]] = None
@dataclass
class GithubApi(DataClassJsonMixin):
github_api_url: str
user: str
repo: str
token: str
retry_limit: int
def _headers_v3(self) -> Dict[str, str]:
return {
"Accept": "application/vnd.github.v3.text-match+json",
"Authorization": f"token {self.token}",
"User-Agent": f"{self.user} {self.repo}",
}
def _headers_v4(self) -> Dict[str, str]:
return {
"Authorization": f"bearer {self.token}",
"User-Agent": f"{self.user} {self.repo}",
}
@staticmethod
def _wait() -> None:
# Don't make too many requests per second.
# We are unlikely to reach official rate limits, BUT repeated polling can look like abuse.
# TODO: revisit this if needed.
time.sleep(1)
def create_release(self, release: Release) -> requests.Response:
self._wait()
return requests.post(
url=f"{self.github_api_url}/repos/{self.user}/{self.repo}/releases",
json=to_dict(release),
headers=self._headers_v3(),
)
def get_release_by_tag(self, tag_name: str) -> requests.Response:
self._wait()
return requests.get(
url=f"{self.github_api_url}/repos/{self.user}/{self.repo}/releases/tags/{tag_name}",
headers=self._headers_v3(),
)
def get_asset_by_id(self, asset_id: str) -> requests.Response:
self._wait()
return requests.get(
url=f"{self.github_api_url}/repos/{self.user}/{self.repo}/releases/assets/{asset_id}",
headers=self._headers_v3(),
)
def delete_asset(self, asset_id: str) -> requests.Response:
self._wait()
return requests.delete(
url=f"{self.github_api_url}/repos/{self.user}/{self.repo}/releases/assets/{asset_id}",
headers={**self._headers_v3(), "Content-type": "application/json"},
)
def upload_asset(self, file_path: Path, release: Release) -> requests.Response:
if not release.upload_url:
raise AssertionError("Need release object with upload_url.")
# Upload URL looks like:
# https://uploads.github.com/repos/octocat/Hello-World/releases/1/assets{?name,label}
# We want the part before {.
upload_url = release.upload_url.split("{")[0]
# Then we add the name.
upload_url = f"{upload_url}?name={file_path.name}"
self._wait()
with file_path.open(mode="rb") as f:
return requests.post(
url=upload_url,
headers={
**self._headers_v3(),
"Content-Type": "application/octet-stream",
},
data=f,
)
def graphql_query(self, query: str) -> requests.Response:
self._wait()
return requests.post(
f"{self.github_api_url}/graphql",
headers=self._headers_v4(),
json={"query": query},
)
def find_asset_id_by_file_name(
self, file_name: str, release: Release
) -> Optional[str]:
"""
Returns the asset id.
This relies on undocumented behavior; see release_asset_node_id_to_asset_id.
:returns the asset id or None if the asset was not found.
"""
if not release.tag_name:
raise AssertionError("Expected tag_name")
# We get the asset id via GitHub's v4 GraphQL API, as this seems to be more reliable.
# But most other operations still require using the REST v3 API.
log(f"Finding asset id using v4 API of {file_name}.")
query = f"""
query {{
repository(owner:"{self.user}", name:"{self.repo}") {{
release(tagName:"{release.tag_name}") {{
releaseAssets(first: 1, name:"{file_name}") {{
nodes {{
id
}}
}}
}}
}}
}}
"""
response = self.graphql_query(query)
# Even on errors, the response should be "OK".
if response.status_code != requests.codes.ok:
raise UnexpectedResponseError(response)
try:
response_json = json.loads(response.content)
except json.JSONDecodeError:
raise UnexpectedResponseError(response)
# The response should look a bit like this:
# {
# "data": {
# "repository": {
# "release": {
# "releaseAssets": {
# "nodes": [
# {
# "id": "MDEyOlJlbGVhc2VBc3NldDE4MzgxNTc3" # noqa: SC100
# }
# ]
# }
# }
# }
# }
# }
try:
assets = response_json["data"]["repository"]["release"]["releaseAssets"][
"nodes"
]
except KeyError:
raise UnexpectedResponseError(response)
if not assets:
# Asset not found.
return None
try:
node_id: str = assets[0]["id"]
except KeyError:
raise UnexpectedResponseError(response)
return release_asset_node_id_to_asset_id(node_id)
def verify_asset_size_and_state_via_v3_api(
self, file_name: str, file_size: int, tag_name: str, release: Optional[Release]
) -> bool:
if not release:
log("Getting the current release again to check asset status.")
response = self.get_release_by_tag(tag_name)
if response.status_code != requests.codes.ok:
raise UnexpectedResponseError(response)
log("Decoding release info.")
try:
release = Release.from_json(response.content)
except json.JSONDecodeError:
raise UnexpectedResponseError(response)
if release.assets:
for asset in release.assets:
if (
asset.name == file_name
and asset.size == file_size
and asset.state == "uploaded"
):
log("The asset has the correct size and state. Asset done.\n")
return True
return False
class MissingTokenError(Exception):
pass
class MissingFilesError(Exception):
def __init__(self, missing_paths: List[Path]):
self.missing_paths = missing_paths
missing_paths_str = [str(p) for p in missing_paths]
missing_paths_joined = "\n" + "\n".join(missing_paths_str) + "\n"
super().__init__(f"Missing: {missing_paths_joined}")
class UnexpectedResponseError(Exception):
def __init__(self, response: requests.Response):
self.response = response
super().__init__(f"Unexpected response: {response.__dict__}")
class ReachedRetryLimitError(Exception):
pass
def upload_file( # pylint: disable=too-many-branches,too-many-nested-blocks,too-many-statements;
g: GithubApi, release: Release, file_path: Path # noqa: VNE001
) -> None:
log(f"\nUpload: {file_path.name}")
file_size = file_path.stat().st_size
retry_count = 0
wait_time = 2
if not release.tag_name:
raise AssertionError("Expected tag_name")
# Optimization:
# The v3 API does not always show assets that are in a bad state, but if the asset *does* exist with the correct
# size and state, then we can assume the asset was successfully uploaded.
# We use the existing |release| object, which means we might be able to skip making any further remote API calls.
try:
if g.verify_asset_size_and_state_via_v3_api(
file_name=file_path.name,
file_size=file_size,
tag_name=release.tag_name,
release=release,
):
return
except Exception: # pylint: disable=broad-except;
log_exception(
"Ignoring exception that occurred when trying to check asset status with the v3 API."
)
# Only exit the loop if we manage to verify that the asset has the expected size and state, or if we reach the retry
# limit.
while True:
# We use try-except liberally so that we always at least try to blindly upload the asset (towards the end of the
# loop), because this may well succeed and then the asset checking code may also be more likely to succeed on
# subsequent iterations.
# Optimization:
# The v3 API does not always show assets that are in a bad state, but if the asset *does* exist with the
# correct size and state, then we can assume the asset was successfully uploaded, without relying on
# undocumented behavior.
# We pass release=None, which forces a fresh fetch of the Release object.
try:
if g.verify_asset_size_and_state_via_v3_api(
file_name=file_path.name,
file_size=file_size,
tag_name=release.tag_name,
release=None,
):
return
except Exception: # pylint: disable=broad-except;
log_exception(
"Ignoring exception that occurred when trying to check asset status with the v3 API."
)
| |
<filename>Voice Assistant/Jarvis.py
import datetime
import pickle
import smtplib
import subprocess
from GoogleNews import GoogleNews
import pywhatkit as kit
import tkinter as tk
from bs4 import BeautifulSoup
from getpass import getpass
import pytz
import time as tm
import speech_recognition as sr # importing speech recognition package from google api
import requests
import playsound # to play saved mp3 file
import wikipedia
from gtts import gTTS # google text to speech
import os # to save/open files
import wolframalpha # to calculate strings into formula, its a website which provides api, 100 times per day
from selenium import webdriver # to control browser operations
from selenium.webdriver.common.keys import Keys
from io import BytesIO
from io import StringIO
import webbrowser
# from webdriver_manager.chrome import ChromeDriverManager
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import pyautogui
import psutil
import pyjokes
import numpy as np
# import cv2
# import face_recognition
# from face_rec_test import *
# name = ''
#
# video_capture = cv2.VideoCapture(0)
#
# aryan_image = face_recognition.load_image_file('known/aryan.jpg')
#
# aryan_face_encoding = face_recognition.face_encodings(aryan_image)[0]
#
# known_face_encodings = [aryan_face_encoding]
# known_face_names = ['Aryan']
driver = webdriver.Chrome(
executable_path=r"C:\Users\Aryan\.wdm\drivers\chromedriver\chromedriver.exe")
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
MONTHS = ["january", "february", "march", "april", "may", "june", "july", "august", "september", "october", "november",
"december"]
DAYS = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
DAY_EXT = ["rd", "th", "st", "nd"]
WAKE = "jarvis"
num = 1
webbrowser.Mozilla()
date = datetime.datetime.today().date().strftime('%d/%m/%Y')
googlenews = GoogleNews(lang='en', period='d', start=date, end=date)
root = tk.Tk()
pyautogui.hotkey('winleft', 'down')
def speak(output):
global num
num += 1
print("JARVIS: ", output)
toSpeak = gTTS(text=output, lang='en-US', slow=False)
file = str(num) + ".mp3"
toSpeak.save(file)
playsound.playsound(file, True)
os.remove(file)
def get_audio():
r = sr.Recognizer()
audio = ''
with sr.Microphone() as source:
print("Speak...")
audio = r.listen(source, phrase_time_limit=20)
print("Stop.")
try:
text = r.recognize_google(audio, language='en-in')
print("You: ", text)
return text.lower()
except:
# speak("Could not understand your audio, PLease try again!")
return '0'
def get_alarm_audio():
r = sr.Recognizer()
audio = ''
with sr.Microphone() as source:
print("Speak...")
audio = r.listen(source, phrase_time_limit=1)
print("Stop.")
try:
text = r.recognize_google(audio, language='en-in')
print("You: ", text)
return text.lower()
except:
# speak("Could not understand your audio, PLease try again!")
return '0'
def send_mail():
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
speak('What is the content?')
body = input('What is the content?')
server.login('email', 'wbjcfgttltoghllk')
subject = 'From JARVIS'
speak('To whom are you sending this? Please write below.')
receiver = input('To whom are you sending this?')
msg = f"Subject: {subject}\n\n{body}"
server.sendmail(
'JARVIS',
receiver,
msg
)
def search_web(input):
""" driver = webdriver.Edge()
driver.implicitly_wait(1)"""
global indx
if 'youtube' in input.lower():
speak("Opening in youtube")
indx = input.lower().split().index('youtube')
query = input.split()[indx + 1:]
if query[0] == 'for':
del query[0]
else:
pass
driver.get(url="http://www.youtube.com/results?search_query=" + ''.join(query))
""" speak('What do you want to search?')
search_que = get_audio()
search_box = driver.find_element_by_xpath(
'//*[@id="search"]')
search_box.send_keys(search_que)
search_button = driver.find_element_by_xpath(
'//*[@id="search-icon-legacy"]')
search_button.click()"""
return
elif 'wikipedia' in input.lower() or 'who is' in input.lower() or 'what is' in input.lower():
speak("searching in wikipedia")
query = input.replace("wikipedia", "")
query = input.replace("who is", "")
query = input.replace("what is", "")
results = wikipedia.summary(query, sentences=2)
speak("According to wikipedia")
speak(results)
return
else:
if 'google' or 'internet' or 'web' in input:
try:
indx = input.lower().split().index('google for')
query = input.split()[indx + 1:]
if query[0] == 'for':
del query[0]
else:
pass
webbrowser.open(url="https://www.google.com/search?q=" + ''.join(query))
except ValueError:
webbrowser.open(url="https://www.google.com/search?q=" + ''.join(input.split()))
elif 'search' in input:
try:
indx = input.lower().split().index('google')
except ValueError:
webbrowser.open(url="https://www.google.com/search?q=" + ''.join(input.split()))
query = input.split()[indx + 1:]
if query[0] == 'for':
del query[0]
else:
pass
webbrowser.open(url="https://www.google.com/search?q=" + ''.join(query))
else:
webbrowser.open(url="https://www.google.com/search?q=" + ''.join(input.split()))
return
def note(txr):
da = datetime.datetime.now()
file_name = 'D:\\Voice Assistant\\notes\\' + str(da).replace(":", "-") + "-note.txt"
with open(file_name, "w") as f:
f.write(txr)
subprocess.Popen(["notepad.exe", file_name])
def authenticate_google():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
return service
def get_events(day, service):
# Call the Calendar API
date = datetime.datetime.combine(day, datetime.datetime.min.time())
end_date = datetime.datetime.combine(day, datetime.datetime.max.time())
utc = pytz.UTC
date = date.astimezone(utc)
end_date = end_date.astimezone(utc)
events_result = service.events().list(calendarId='primary', timeMin=date.isoformat(), timeMax=end_date.isoformat(),
singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
speak('No upcoming events found.')
else:
speak(f"You have {len(events)} events on this day.")
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
start_time = str(start.split("T")[1].split("-")[0])
if int(start_time.split(":")[0]) < 12:
start_time = start_time + "am"
else:
start_time = str(int(start_time.split(":")[0]) - 12) + start_time.split(":")[1]
start_time = start_time + "pm"
speak(event["summary"] + " at " + start_time)
def get_date(txr):
txr = txr.lower()
today = datetime.date.today()
if txr.count("today") > 0:
return today
day = -1
day_of_week = -1
month = -1
year = today.year
for word in txr.split():
if word in MONTHS:
month = MONTHS.index(word) + 1
elif word in DAYS:
day_of_week = DAYS.index(word)
elif word.isdigit():
day = int(word)
else:
for ext in DAY_EXT:
found = word.find(ext)
if found > 0:
try:
day = int(word[:found])
except Exception as e:
print(e)
pass
# THE NEW PART STARTS HERE
if month < today.month and month != -1: # if the month mentioned is before the current month set the year to the
# next
year = year + 1
# This is slightly different from the video but the correct version
if month == -1 and day != -1: # if we didn't find a month, but we have a day
if day < today.day:
month = today.month + 1
else:
month = today.month
# if we only found a dta of the week
if month == -1 and day == -1 and day_of_week != -1:
current_day_of_week = today.weekday()
dif = day_of_week - current_day_of_week
if dif < 0:
dif += 7
if txr.count("next") >= 1:
dif += 7
return today + datetime.timedelta(dif)
if day != -1: # FIXED FROM VIDEO
return datetime.date(month=month, day=day, year=year)
def open_application(inp):
if "chrome" in inp:
speak("Google Chrome")
os.startfile(r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe')
return
elif "firefox" in inp or "mozilla" in inp:
speak("Opening Mozilla Firefox")
os.startfile(r'C:\Program Files\Mozilla Firefox\\firefox.exe')
return
elif "word" in inp:
speak("Opening Microsoft Word")
os.startfile(r'C:\ProgramData\Microsoft\Windows\Start Menu\Programs\\Word')
return
elif "excel" in inp:
speak("Opening Microsoft Excel")
os.startfile(r'C:\ProgramData\Microsoft\Windows\Start Menu\Programs\\Excel')
return
else:
speak("Application not available")
return
def open_link(url):
driver.get(url)
"""def get_news(news):
googlenews.clear()
googlenews.search(str(news))
googlenews.getpage(1)
result = googlenews.result()
art = dict()
root.geometry('400x400')
for i in range(len(result)):
title = result[i]['title']
desc = result[i]['desc']
url = result[i]['link']
source = result[i]['media']
a_date = result[i]['date']
art[title] = title, desc, source, a_date, url
list_temp = [title, desc, a_date, source]
tk.Button(root, text='\n'.join(list_temp), command=lambda: open_link(url=url)).pack()
tk.Scrollbar(root).pack(side='right', fill='y')
root.mainloop()
root.title('News')"""
def time():
ct = [int(datetime.datetime.now().strftime('%I')), int(datetime.datetime.now().strftime('%M')),
datetime.datetime.now().strftime('%p')]
speak('The current time is: ')
speak(' '.join(map(str, ct)))
def date():
dt = [int(datetime.datetime.now().day), int(datetime.datetime.now().month), int(datetime.datetime.now().year)]
speak('The current date is: ')
speak('/'.join(map(str, dt)))
def play_song(song_name):
print('d')
song_name = song_name.lower()
print('q')
song = song_name.replace('play', '')
print(song)
kit.playonyt(song)
print('r')
def satw(path, number):
tm.sleep(6)
webbrowser.open(f'https://web.whatsapp.com/send?phone={number}')
pyautogui.hotkey('winleft', 'up')
pyautogui.click(2839, 173)
pyautogui.click(2858, 281)
pyautogui.typewrite(['*', '.', '*', 'enter'])
pyautogui.typewrite(path)
pyautogui.typewrite(['enter'])
tm.sleep(2)
pyautogui.click(2836, 1869)
def wish_me(name):
hour = datetime.datetime.now().hour
if 6 <= hour < 12:
speak(f'Good Morning! Welcome back, {name}!')
elif 12 <= hour < 18:
speak(f'Good Afternoon! Welcome back, {name}!')
elif 18 <= hour < 22:
speak(f'Good Evening! Welcome Back, {name}!')
else:
speak('Good Night! Welcome Back!')
speak('I am Jarvis your Personal Assistant.')
def screenshot():
da = datetime.datetime.now()
file_name = 'D:\\Voice Assistant\\photos\\' + str(da).replace(":", "-") + "-screenshot.png"
img = pyautogui.screenshot()
img.save(file_name)
def cpu():
usage = str(psutil.cpu_percent())
speak('CPU is at ' + usage + ' percent.')
battery = str(psutil.sensors_battery().percent)
if psutil.sensors_battery().power_plugged:
speak('Battery is at ' + battery + ' percent and is plugged in.')
else:
speak('Battery is at ' + battery + ' percent and is not plugged in.')
def jokes():
speak(pyjokes.get_joke())
def process_text(inp):
service = authenticate_google()
inp = inp.lower()
try:
if "who are you" in inp or "define yourself" in inp:
te = '''Hello, I am Jarvis. Your personal Assistant.
I am here to make your life easier.
You can command me to perform various tasks such as calculating sums or opening applications etcetra'''
speak(te)
return
elif "who made you" in inp or "created you" in inp:
te = "I have been created by Aryan."
speak(te)
return
elif "crazy" in inp:
te = """Well, there are many mental asylums in the world."""
speak(te)
return
elif 'news' in inp:
speak("I didn't quite understand what I should search for. | |
<filename>reference/parson/eg_mutagen_from_js.py<gh_stars>1-10
"""
Translate from Zarf's Javascript (the subset used) to my Mutagen
grammar syntax.
http://www.eblong.com/zarf/mutagen/mutagen.js
"""
import re
from parson import Grammar
def translate(grammar):
for x, y in reversed(g.grammar(grammar)):
print x, '=', y
g = Grammar(r"""
grammar = _ defn*.
defn : var '='_ exp ';'_ :hug.
exp : 'Choice' args :mk_choice
| 'Fixed' args :mk_fixed
| 'Sequence' args :mk_sequence
| 'Shuffle' args :mk_shuffle
| 'Weighted' args :mk_weighted
| /Period\b/_ :'.'
| /Comma\b/_ :','
| /Semicolon\b/_ :';'
| /Dash\b/_ :'--'
| /AAn\b/_ :'-a-an-'
| /Concat\b/_ :'-adjoining-'
| /null\b/_ :'()'
| var
| string
| int.
args : '('_ exps? ')'_.
exps : exp (','_ exps)*.
var : /([A-Za-z_]\w*)/_ :mk_var.
int : /(\d+)/ :int.
string : '"' qchar* '"'_ :join.
qchar : !/["\\]/ /(.)/.
_ : (space | comment)*.
space : /\s+/.
comment : '/*' (!'*/' anyone)* '*/'.
anyone : /./ | /\n/. # Ugh.
""")(mk_var = lambda s: '-'+'-'.join(parse_camel(s))+'-',
mk_choice = lambda *xs: ' / '.join(xs),
mk_fixed = lambda tag, choice: '%s{ %s }' % (tag, choice),
mk_sequence = lambda *xs: ' '.join(map(wrap, xs)),
mk_shuffle = lambda *xs: '{ %s }' % (' / '.join(xs)),
mk_weighted = lambda *spairs: ' / '.join('[%s] %s' % (w, wrap(x))
for w, x in zip(spairs[0::2],
spairs[1::2])),
)
def wrap(x):
return '(%s)' % x if ' / ' in x else x
def parse_camel(s):
assert re.match(r'([A-Z][a-z]*)*$', s)
return [part.lower() for part in re.findall(r'[A-Z][a-z]*', s)]
## parse_camel('HiThere')
#. ['hi', 'there']
## g.grammar('A = "hi";')
#. (('-a-', 'hi'),)
## g.grammar('A = Choice();')
#. (('-a-', ''),)
## g.grammar('A = Choice("hi");')
#. (('-a-', 'hi'),)
## g.grammar('A = Choice("hi", "there"); B = A;')
#. (('-a-', 'hi / there'), ('-b-', '-a-'))
## goreyfate = open('mutagen/goreyfate.js').read()
## translate(goreyfate)
#. -gorey-fate- = [2] -person-description- -action- -time- / [2] -time- -time-comma- -person-description- -action- / [1] it was -time- that -person-description- -action-
#. -action- = -passive-action- / -active-action-
#. -active-action- = -active-action-word- -active-action-prep- -a-an- ([1] -target-air- / [2] ()) ([1] -target-age- / [2] ()) -active-action-target-
#. -target-age- = old / moldering / aged / antiquated
#. -target-air- = disreputable / peculiar / mysterious / banal
#. -active-action-target- = altitude{ -active-action-target-hi- / -active-action-target-lo- }
#. -active-action-target-lo- = well / hole / cave / oubliette / cellar / pit
#. -active-action-target-hi- = tower / cliff / ruin / pillar / treehouse / garret
#. -active-action-prep- = altitude{ -active-action-prep-hi- / -active-action-prep-lo- }
#. -active-action-prep-lo- = down / into
#. -active-action-prep-hi- = down from / off / from
#. -active-action-word- = fell / tumbled / disappeared / plummeted / vanished / dropped
#. -passive-action- = -passive-action-word- ([2] -passive-action-qualifier- / [3] ())
#. -passive-action-qualifier- = away / at sea / without a trace / unexpectedly / mysteriously / into -action-result- / away into -action-result-
#. -action-result- = -dest-noun- / -dest-modifier- -dest-noun- / -a-an- -dest-noun- / -a-an- -dest-modifier- -dest-noun- / -a-an- -dest-form- of -dest-noun- / -a-an- -dest-form- of -dest-modifier- -dest-noun- / -a-an- -dest-modifier- -dest-form- of -dest-noun-
#. -dest-form- = solidity{ puddle / bucket / vat / heap / cloud / waft }
#. -dest-modifier- = noisome / pearlescent / foul / fetid / glittering / dark / briny / glistening / cloying
#. -dest-noun- = solidity{ slime / stew / secretion / mist / smoke / dust / vapor }
#. -passive-action-word- = exploded / vaporized / melted / sublimated / evaporated / transformed / calcified / vanished / faded / disappeared / shrivelled / bloated / liquefied / was lost / was misplaced / was bartered
#. -time-comma- = longtime{ -maybe-comma- / , / -maybe-comma- / -maybe-comma- / -maybe-comma- / -maybe-comma- / -maybe-comma- / , }
#. -maybe-comma- = [2] () / [1] ,
#. -time- = longtime{ one -day-weather- -day-part- / one -day-weather- -day-part- last -time-unit- / last -day-of-week- / last -time-unit- / -a-an- -time-unit- ago / on -holiday- / last -holiday- / -a-an- -time-unit- ago -holiday- / -two-to-six- -time-unit- -adjoining- s ago / -travel-time- }
#. -travel-time- = ([2] while / [1] whilst) (on safari to / exploring / on an expedition to / hunting in / on sabbatical in) -travel-place-
#. -travel-place- = Mozambique / Uganda / the Seychelles / the Vatican / Peoria / Borneo / Antarctica / Somerville / Northumberland / Saxony / Brugges / Gondwanaland
#. -holiday- = Christmas / Boxing Day / St. Swithin's Day
#. -day-of-week- = Monday / Tuesday / Wednesday / Thursday / Friday / Saturday
#. -day-part- = day / afternoon / morning / evening
#. -time-unit- = week / month / season
#. -day-weather- = [1] (rainy / foggy / blistering / blustery / gloomy / dank) / [2] ()
#. -two-to-six- = two / three / four / five / six / some
#. -person-description- = -name- ([2] -comma-description-phrase- / [1] ())
#. -comma-description-phrase- = , -a-an- ([1] -person-adjective- / [1] ()) -descriptor- ([1] -descriptor-modifier- / [2] ()) ,
#. -descriptor-modifier- = of -intensifier- (perspicacity / fortitude / passion / wit / perception / presence of mind)
#. -descriptor- = [1] -neutral-descriptor- / [1] (gender{ -male-descriptor- / -female-descriptor- })
#. -female-descriptor- = young miss / girl / maiden / flapper
#. -male-descriptor- = stalwart / gentleman / boy / youth
#. -neutral-descriptor- = toddler / aesthete / writer / artist
#. -intensifier- = great / some / considerable / not inconsiderable / distinct / impressive / unique / notable
#. -person-adjective- = precocious / unflappable / energetic / forceful / inimitable / daring / mild / intense / jaded
#. -he-she- = gender{ he / she }
#. -name- = gender{ -male-name- / -female-name- }
#. -male-name- = Bernard / Joseph / Emmett / Ogden / Eugene / Xerxes / Joshua / Lemuel / Etienne
#. -female-name- = Emmalissa / Chloe / Tiffani / Eunice / Zoe / Jennifer / Imelda / Yvette / Melantha
## wake = open('mutagen/wake.js').read()
## translate(wake)
#. -wake-root- = -drinking-sentence- / -river-march-sentence-
#. -drinking-sentence- = -drinking-sub-sentence- ([1] () / [1] -action-gloss-)
#. -drinking-sub-sentence- = -people- -drink-action- / -people- -side-action- and -drink-action- / -people- -side-action- , -side-action- , and -drink-action-
#. -river-march-sentence- = -people- -march-verb- (along / up / down) -river-road- ([1] () / [1] -action-gloss-) / (along / up / down) -river-road- ([1] () / [1] -locale-gloss-) -march-verb- -people- ([1] () / [1] -action-gloss-)
#. -action-gloss- = , ((spreading / sowing / leaving) (chaos / disaster / wrack and disaster) in their wake / (turning / transfiguring / transforming) (the world / all around / everything) into -a-an- (circus / carnival) (() / () / of their own making / of their own design) / (undertaking / preparing / investing) their best (effort / determination / and their most impassioned / () / ()) / as if (the world were their -bivalve- / nothing could be finer / the world had been made for them / they had been created for this day) / (at almost / almost at / at nearly / nearly at / approaching / having achieved) the end of -a-journey- / making the best of -a-journey- / (refusing / denying / disavowing / repudiating) (any / all / the slightest) thought of consequence (() / or causality) / making -a-an- (() / () / holy / unholy / disreputable) (spectacle / display / curiosity) of themselves (() / and their cause / and everyone around them)) ,
#. -side-action- = -side-verb- / -side-verb- / -side-verb- / -side-verb- with (great / some / () / () / ()) (vigor / vim / energy / gusto / enthusiasm) / -tone-adv- -side-verb- / -side-verb- -tone-adv- / -side-verb- (most / quite) -tone-adv-
#. -side-verb- = { stood / congregated / argued / fought / expostulated / shoved / prayed / sang / danced / lectured / remonstrated / pounded the table / banged their mugs / insulted each other / told stories / remembered old times / demanded satisfaction }
#. -drink-action- = -drank- -whiskey- / (drank / imbibed) prodigiously / -drank- prodigious quantities of -whiskey-
#. -drank- = drank / consumed / imbibed / partook of
#. -whiskey- = their whiskey / whiskey / poteen / moonshine / spirits / alcoholic spirits / strong drink / liquor / strong liquor
#. -a-journey- = a journey / a difficult journey / a long and difficult journey / their travels / their travails / (a / a sort of) journey (I need not describe / that | |
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.db import IntegrityError
from django.utils.text import slugify
from django.http import HttpResponse, JsonResponse
from decimal import Decimal
from build.management.commands.base_build import Command as BaseBuild
from build.management.commands.build_ligand_functions import *
from common.tools import fetch_from_cache, save_to_cache, fetch_from_web_api
from common.models import WebLink, WebResource, Publication
from protein.models import Protein, ProteinCouplings
from ligand.models import Ligand, LigandType, Endogenous_GTP, BiasedData, BalancedLigands
from ligand.functions import OnTheFly
import logging
import math
import pandas as pd
import numpy as np
import os
import traceback
import time
import requests
import timeit
# The pEC50 is defined as the negative logarithm of the EC50
MISSING_PROTEINS = {}
SKIPPED = 0
#Copying code structure from Alibek upload_excel_bias data script
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
help = 'Reads bias data and imports it'
structure_data_dir = os.sep.join([settings.DATA_DIR, 'ligand_data', 'bias_data'])
cell_data_dir = os.sep.join([settings.DATA_DIR, 'ligand_data', 'cell_line'])
publication_cache = {}
cell_cache = {}
ligand_cache = {}
data_all = []
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-f', '--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
parser.add_argument('--test_run', action='store_true', help='Skip this during a test run',
default=False)
def handle(self, *args, **options):
if options['test_run']:
print('Skipping in test run')
return
# delete any existing structure data
if options['purge']:
try:
print('Started purging bias and balanced data')
Command.purge_bias_data()
print('Ended purging bias and balanced data')
except Exception as msg:
print(msg)
self.logger.error(msg)
# import the structure data
Command.prepare_all_data()
@staticmethod
def purge_bias_data():
delete_bias_excel = BiasedData.objects.all() #New Model Biased Data
delete_balanced_ligands = BalancedLigands.objects.all() #New Model Balanced Ligands
delete_bias_excel.delete()
delete_balanced_ligands.delete()
@staticmethod
def prepare_all_data():
start = timeit.default_timer()
print('**** Stage #1: reading Excel ****')
bias_data = Command.read_excel_pandas(Command.structure_data_dir, 'Biased_ligand_single_pathway_data.xlsx')
cell_data = Command.read_excel_pandas(Command.cell_data_dir, 'Cell_lines.xlsx')
print('**** Stage #2: parsing Excel ****')
df_from_excel = Command.convert_df_to_dict(bias_data)
print('**** Stage #3: processing & uploading data ****')
Command.main_process(df_from_excel, cell_data)
print('**** Stage #4: updating physiology biased ligands')
Command.update_biased_columns()
print('**** Stage #5: updating physiology biased (subtypes) ligands')
Command.update_biased_columns(subtype=True)
print('**** Stage #6: updating pathway preference')
Command.update_biased_columns(pathway=True)
print('**** Stage #7: creating Balanced Ligand database')
Command.model_assemble()
print('**** Stage #8: updating biased ligands using balanced reference')
Command.update_biased_columns(balanced=True)
print('**** Stage #9: updating biased ligands using balanced reference (subtype)')
Command.update_biased_columns(subtype=True, balanced=True)
stop = timeit.default_timer()
print('Total Time:', stop - start)
@staticmethod
def read_excel_pandas(datadir, filename):
source_file_path = os.sep.join([datadir, filename]).replace('//', '/')
xls = pd.ExcelFile(source_file_path)
df = pd.read_excel(xls, 'Data', dtype=str)
return df
@staticmethod
def convert_df_to_dict(df):
#Remove excel rows where no Emax value and EC50 has “=” sign and unit is “nM” (38 rows)
df.drop(df[(df['Alt 1)\nQuantitative efficacy'].isna()) & (df['>\n<\n=\n~'] == '=') & (df['Unit'] == 'nM')].index, inplace=True)
#cast everything to str
df = df.astype(str)
#cast NaN to none
for column in df:
df[column] = df[column].replace({'nan':None})
#convert pandas df into list of dicts
return_list_of_dicts = df.to_dict('records')
return return_list_of_dicts
@staticmethod
def main_process(df_from_excel, cell):
prot_dict = {}
gprot_dict = {}
lig_dict = {}
for d in df_from_excel:
#checking data values: float, string and low_activity checks
d = Command.data_checks(d)
#converting pEC50 to EC50. Normalizing to Molar
d['Alt 1)\nQuantitative activity'], d['Measure type'], d['>\n<\n=\n~'] = Command.fetch_measurements(potency=d['Alt 1)\nQuantitative activity'], p_type= d['Measure type'], unit = d['Unit'], sign = d['>\n<\n=\n~'])
#fetch protein name - check for empty lines
if d['Receptor\nUniProt entry name or code'] == None:
continue
else:
prot_code = d['Receptor\nUniProt entry name or code'].lower()
if prot_code not in prot_dict:
prot_dict[prot_code] = Command.fetch_protein(prot_code)
protein = prot_dict[prot_code]
if protein == None:
continue # Skip if protein not found
#re-labeling "G protein" and "Gq/11 or Gi/o" based on data from the GProtein Couplings db
if d['Primary effector family'] == 'G protein':
if protein.id not in gprot_dict:
gprot_dict[protein.id] = Command.fetch_g_protein(protein.id)
d['Primary effector family'] = gprot_dict[protein.id]
#fetching publication info
pub = Command.fetch_publication(d['Reference\nDOI or PMID'])
#fetching the tissue and specie info from the excel sheet
species, tissue = Command.fetch_cell_line_info(d['Cell line'], cell)
#fetching ligand information
types = {"PubChem CID":"pubchem", "SMILES": "smiles", "IUPHAR/BPS Guide to pharmacology": "gtoplig"}
if d['ID'] != None:
if d['ID type'] != None:
key = d['ID'] + "|" + d['ID type']
else:
key = d['ID'] + "|None"
if key in lig_dict:
l = lig_dict[key]
else:
ids = {}
if d['ID type'] in types:
if isinstance(d['ID'], list):
ids[types[d['ID type']]] = d['ID'][0]
else:
ids[types[d['ID type']]] = d['ID']
elif d['ID type'] == "PubChem SID":
# Try to resolve SID to CID
cid = resolve_pubchem_SID(d['ID'])
if cid != None:
ids["pubchem"] = cid
l = get_or_create_ligand(d['Ligand tested for bias or func. Sel.\nName'], ids, "small-molecule", False, True)
lig_dict[key] = l
# What about the other ligand => use as reference?
# if d['ID.1'] != None:
# ids = {}
# if d['ID type.1'] in types:
# ids[types[d['ID type.1']]] = d['ID.1']
# ligand = get_or_create_ligand(d['Emax reference ligand\nName'], ids)
#Fetching from the endogenous excel datasheet (will need to be updated with GtoP data)
#also need to be added some parameter to check onto (not all our data has PubChem CIDs)
#translate SMILES into CID?
# if d['ID type'] == 'PubChem CID':
endogenous_status = Command.fetch_endogenous(protein.id, l.id)
signalling_protein = d['Primary effector subtype']
try:
signalling_protein = signalling_protein.strip().replace('α','a').replace('β','B').replace('g','G').lower()
except:
signalling_protein = None
#assessing EC50 value (No value in case of IC50):
EC50 = None
if d['Measure type'] != 'IC50':
EC50 = d['Alt 1)\nQuantitative activity']
experiment_data= BiasedData(
ligand = l,
publication = pub,
experiment = d['Fig./table No. '], #to be added by Kasper (TBA)
endogenous_status = endogenous_status, #need to fetch from endogenous ligand browser now fetching from the excel datasheet
receptor = protein,
receptor_isoform = d['UniProt identifier code (isoform)'],
active_receptor_complex = d['GtoP receptor name'],
cell_line = d['Cell line'],
tissue = tissue,
species = species,
primary_effector_family = d['Primary effector family'],
primary_effector_subtype = signalling_protein,
molecule_1 = d['Measured molecule 1'],
molecule_2 = d['Measured molecule 2'],
measured_process = d['Measured process'],
pathway_level = d['Pathway level'],
assay_type = d['Assay type'],
EC50 = EC50,
EC50_sign = d['>\n<\n=\n~'],
qualitative_activity=d['Alt 2)\nQualitative activity'],
Emax = d['Alt 1)\nQuantitative efficacy'],
Emax_sign = d['>\n<\n=\n~.1'],
Tau_KA=d['Transduction Coefficient [log(τ/KA)]'],
delta_Tau_KA=d['Relative Transduction Coefficient [Δlog(τ/KA)]'],
time_resolved=d['Time resolved'],
)
experiment_data.save()
@staticmethod
def data_checks(data):
#floats check
floaters = ['Alt 1)\nQuantitative activity', 'Alt 1)\nQuantitative efficacy', 'Transduction Coefficient [log(τ/KA)]', 'Relative Transduction Coefficient [Δlog(τ/KA)]']
for key in floaters:
try:
data[key] = float(data[key])
except (TypeError, ValueError):
data[key] = None
try: ## should be 166 rows
if data['Alt 1)\nQuantitative activity'] < 5 and data['Measure type'] == 'pEC50' and data['Alt 1)\nQuantitative efficacy'] > 0.0:
data['Alt 1)\nQuantitative activity'] = 4.9
except TypeError:
pass
try: ## relabeling qualitative activity when EC50 but no Emax/log(Tau/KA)
if data['Measure type'] == 'EC50' or data['Measure type'] == 'pEC50':
if (data['Alt 1)\nQuantitative efficacy'] == None) and (data['Transduction Coefficient [log(τ/KA)]'] == None) and (data['Alt 1)\nQuantitative activity'] is not None):
data['Alt 2)\nQualitative activity'] = 'No activity'
except TypeError:
pass
try: ## relabeling qualitative activity when IC50 or pIC50
if data['Measure type'] == 'pIC50' or data['Measure type'] == 'IC50':
data['Alt 2)\nQualitative activity'] = 'No activity'
except TypeError:
pass
#low activity check
try:
if data['Alt 2)\nQualitative activity'].lower() == 'low activity': ###131 total rows
if data['Alt 1)\nQuantitative efficacy'] == None or data['Alt 1)\nQuantitative efficacy'] == 0.0: ##9 rows
data['Alt 1)\nQuantitative activity'] = 4.9
data['Measure type'] = 'pEC50'
data['Alt 1)\nQuantitative efficacy'] = 20
data['Alt 2)\nQualitative activity'] = None
else: ##122 rows, changing a lot
data['Alt 1)\nQuantitative activity'] = 4.9
data['Measure type'] = 'pEC50'
data['Alt 2)\nQualitative activity'] = None
except AttributeError:
pass
#string check
try:
data['Unit'] = str(data['Unit'])
except ValueError:
pass
return data
@staticmethod
def fetch_measurements(potency, p_type, unit, sign):
if potency is not None:
if p_type.lower() == 'pec50':
potency = 10**(potency*(-1))
p_type = 'EC50'
if sign == '<':
sign = '>'
elif sign == '>':
sign = '<'
elif p_type.lower() == 'logec50':
potency = 10**(potency)
p_type = 'EC50'
elif p_type.lower() == 'pic50':
potency = 10**(potency*(-1))
p_type = 'IC50'
elif p_type.lower() == 'logic50':
potency = 10**(potency)
p_type = 'IC50'
if potency is not None:
if unit:
if p_type.lower() == 'ec50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
if p_type.lower() == 'ic50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency | |
import torch
from torch.nn.modules import Module
import cutlassconv_cuda
import int8pool_cuda
import int8mm_cuda
import int8conv_cuda
import torch.nn.functional as F
import time
from options import args
class UpdateWeight(Module):
"""
The parameter update part of both TiConv2d and TiLinear are the same
Use on base class for the common function
"""
def weight_update(self):
p = self.weight
""" vanilla SGD """
self.grad, grad_shift = grad_calc(self.grad_int32acc, GRAD_BITWIDTH)
self.grad_exp = self.err_exp + grad_shift + self.act_in_exp
p.data = int8_clip(p.type(torch.int16)-self.grad.type(torch.int16))
def RoundShift(input, shift):
'''
Shift the input using
stochastic rounding
'''
round_temp = input//(2**shift)
prob = input - round_temp * (2**shift)
round_decision = prob//(2**(shift-1))
return int8_clip(round_temp + round_decision)
def StoShift(input, shift):
'''
Shift the input using
stochastic rounding
'''
tensor_type = input.dtype
round_temp = input//(2**shift)
prob = torch.abs(input - round_temp * (2**shift))
rand_num = torch.randint(low = 0, high=2**shift,size=prob.size(), dtype = tensor_type, device='cuda')
round_decision = torch.where(prob <= rand_num,
torch.tensor(0,dtype=tensor_type,device='cuda'),
torch.tensor(1,dtype=tensor_type,device='cuda'))
round_decision = round_decision * torch.sign(input)
return int8_clip(round_temp + round_decision)
def PstoShift(input, shift):
'''
Shift the input using our
new pseudo stochastic rounding
'''
round_temp = input//(2**shift)
# stochastic rounding
# but use the extra precision as pseudo random number
prob = torch.abs(input - round_temp * (2**shift))
quantized_prob = prob//(2**(shift//2))
pseudo_rand_num = prob - quantized_prob*(2**(shift//2))
# if shift is odd, need to make sure
# qprob and prand have same bit width
if shift % 2 == 1:
pseudo_rand_num = pseudo_rand_num*2
round_decision = torch.where(quantized_prob <= pseudo_rand_num,
torch.tensor(0,dtype=torch.int32,device='cuda'),
torch.tensor(1,dtype=torch.int32,device='cuda'))
round_decision = round_decision * torch.sign(input)
return int8_clip(round_temp + round_decision)
def int8_clip(input, clip_val=127):
return torch.clamp(input,-clip_val, clip_val).type(torch.int8)
def Int8Tensor(val):
return torch.tensor(val,dtype=torch.int8).to(GPU)
def UniformWeights(size):
if len(size) == 4:
temp = torch.zeros(size).permute(0,3,1,2).contiguous()
torch.nn.init.xavier_uniform_(temp)
return weight_quant(temp.permute(0,2,3,1).contiguous())
else:
temp = torch.zeros(size)
torch.nn.init.xavier_uniform_(temp)
return weight_quant(temp)
def NormalWeight(size):
if len(size) == 4:
temp = torch.zeros(size).permute(0,3,1,2).contiguous()
torch.nn.init.xavier_normal_(temp)
return weight_quant(temp.permute(0,2,3,1).contiguous())
else:
temp = torch.zeros(size)
torch.nn.init.xavier_normal_(temp)
return weight_quant(temp)
def Int8zeros(size):
return torch.zeros(size=size, dtype=torch.int8, device=GPU)
def Int8Tensor(val):
return torch.tensor(val,dtype=torch.int8).to(GPU)
def act_calc(int32_acc, exp_in):
'''
calcualte the exponent value of accumulation results
when shifting the int32 back to int8
'''
int32_bitwidth = RangeEstimate(int32_acc)
shift = int32_bitwidth-BITWIDTH
if shift > 0:
exp_out = exp_in+shift
temp = ACT_ROUND_METHOD(int32_acc, shift)
else:
exp_out=exp_in
temp = int32_acc.type(torch.int8)
return temp, exp_out
def err_calc(int32_acc):
'''
calcualte the exponent value of accumulation results
when shifting the int32 back to int8
'''
int32_bitwidth = RangeEstimate(int32_acc)
shift = int32_bitwidth-BITWIDTH
if shift > 0:
temp =ERROR_ROUND_METHOD(int32_acc, shift)
exp_out = shift
else:
temp = int32_acc.type(torch.int8)
exp_out= 0
return temp, exp_out
def grad_calc(int32_acc, mu):
'''
calculate the exponent value of accumulation results
when shifting the int32 back to int8
'''
int32_bitwidth = RangeEstimate(int32_acc)
shift = int32_bitwidth-mu
if int32_bitwidth == 0:
return Int8zeros(int32_acc.size()), 0
elif shift < 1:
return int32_acc.type(torch.int8), 0
else:
return GRAD_ROUND_METHOD(int32_acc,int32_bitwidth-mu), shift
def roundoff4(size):
return (size+3) // 4 * 4
def int8mm(lhs,rhs): # the cuda extension only support n,k as a multiply of 4
# use torch.nn.pad to pad 0 if these dimension doesn't satisfy the
# requirement
k = roundoff4(lhs.size(1))
n = roundoff4(rhs.size(1))
k_diff = k - lhs.size(1)
n_diff = n - rhs.size(1)
if k!=lhs.size(1):
A = F.pad(lhs, (0, k_diff, 0, 0), "constant", 0)
else:
A = lhs
if k!=lhs.size(1) or n!=rhs.size(1):
B = F.pad(rhs, (0, n_diff, 0, k_diff), "constant", 0)
else:
B = rhs
temp = int8mm_cuda.int8_mm(A, B)
if n!=rhs.size(1):
temp = temp [:lhs.size(0),:rhs.size(1)]
return temp.contiguous()
def conv2d_int8(input, weight, stride=1, padding=1):
""" only input channel(tensor.size(3)) is a multiple of 16"""
if input.size(3) % 16 != 0:
padding_channels = 16 - input.size(3) % 16
input_padded = F.pad(input, (0, padding_channels),"constant", 0)
weight_padded = F.pad(weight, (0, padding_channels),"constant", 0)
else:
input_padded = input
weight_padded = weight
if weight.size(1) <= 32:
temp = cutlassconv_cuda.sp_conv_optimized(input_padded, weight_padded, stride, padding)
else:
temp = cutlassconv_cuda.sp_conv(input_padded, weight_padded, stride, padding)
return temp
def conv2d_weight_int8(input, weight_size, grad_output, stride=1, padding=0, dilation=1):
in_channels = input.shape[3]
out_channels = grad_output.shape[3]
min_batch = input.shape[0]
grad_output = grad_output.permute(0,3,1,2)
grad_output = grad_output.contiguous().repeat(1, in_channels, 1, 1)
grad_output = grad_output.contiguous().reshape(grad_output.shape[0] * grad_output.shape[1],
1,
grad_output.shape[2],
grad_output.shape[3])
grad_output = grad_output.permute(0,2,3,1).contiguous()
input = input.permute(0,3,1,2)
input= input.contiguous().reshape(1,
input.shape[0] * input.shape[1],
input.shape[2],
input.shape[3])
input = input.permute(0,2,3,1).contiguous()
grad_weight = int8conv_cuda.group_conv(input,
grad_output,
dilation,
padding,
stride,
in_channels * min_batch)
grad_weight = grad_weight.permute(0,3,1,2)
grad_weight = grad_weight.view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3])
grad_weight = grad_weight.sum(dim=0).view(
in_channels, out_channels,
grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(
2, 0, weight_size[1]).narrow(3, 0, weight_size[2])
grad_weight = grad_weight.permute(0,2,3,1).contiguous()
return grad_weight
class TiLinear(UpdateWeight):
def __init__(self,in_features,out_features, last_layer = False):
super(TiLinear, self).__init__()
self.weight, self.weight_exp = WEIGHT_INIT_METHOD(torch.Size([out_features,in_features]))
self.last_layer = last_layer
def forward(self,input):
""" save activation for backwards """
self.act_in = input
act_in, exp_in = input
temp = int8mm(act_in, self.weight.transpose(0,1).contiguous())
# if self.last_layer:
# act_out, exp_out = temp, exp_in+self.weight_exp
# else:
act_out, exp_out = act_calc(temp,exp_in+self.weight_exp)
return act_out, exp_out
def backward(self, input):
err_in, self.err_exp = input
act, self.act_in_exp = self.act_in
err_out_int32 = int8mm(err_in, self.weight)
err_out, shift_bits = err_calc(err_out_int32)
self.err_exp += (shift_bits + self.weight_exp)
self.grad_int32acc = int8mm(err_in.transpose(0,1).contiguous(), act)
self.weight_update()
return err_out, self.err_exp
class TiReLU(Module):
'''
Integer ReLU layer
'''
def forward(self, input):
'''
Compare the input integer tensor with its zero point
'''
self.act_in = input
act_in, exp_in = input
act_out = torch.max(act_in,Int8Tensor([0]))
return act_out, exp_in
def backward(self, input):
'''
Backward pass for ReLU
if pluged in activation bigger than 0, propagate error input
else propagate 0
'''
err_in, err_exp = input
act, _ = self.act_in
err_out = torch.where(act > Int8Tensor([0]), err_in, Int8Tensor([0]))
return err_out, err_exp
class TiMaxpool2d(Module):
'''
Integer Max Pooling 2d Layer
'''
def __init__(self, kernel_size, stride, padding=0):
super(TiMaxpool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def forward(self,input):
self.act_in, exp_in = input
self.act_out = int8pool_cuda.int8_pool(self.act_in, self.kernel_size, self.stride, self.padding)
return self.act_out, exp_in
def backward(self, input):
""" cudnn pool backward function doesn't support int8, use half instead"""
err_in, err_exp = input
err_out = int8pool_cuda.int8_pool_backward(self.act_out.half(), err_in.half(), self.act_in.half(),
self.kernel_size, self.stride, self.padding).type(torch.int8)
return err_out, err_exp
class TiFlat(Module):
''' Flat the input integer tensor except the batch dimension '''
def forward(self, input):
self.act_in = input
act_in, exp_in = input
act_out = act_in.view(-1,act_in.nelement()//act_in.size(0))
return act_out, exp_in
def backward(self,input):
'''
Convert the Flat error back to the shape before flattern
'''
err_in, err_exp = input
act, _ = self.act_in
return err_in.view_as(act), err_exp
def weight_quant(input):
input_range = torch.max(torch.abs(input))
input_bitwidth=torch.ceil(torch.log2(input_range))
act_exp = input_bitwidth - 7
round_val = torch.round(input/input_range*(2**7-1)).type(torch.int8).to(GPU)
return round_val, act_exp
def TiFloatToInt8(input):
'''
Convert float tensors to integer tensors, used to
1. feed in data in forward pass
2. feed in loss in backwards
'''
input_range = torch.max(torch.abs(input))
input_bitwidth = torch.ceil(torch.log2(input_range))
act_exp = input_bitwidth - BITWIDTH
""" old quantization function, may introduce a small scale to the quantized value"""
# round_val = torch.round(input/input_range*(2**BITWIDTH-1)).type(torch.int8).to(GPU)
norm_val = input*(2**(BITWIDTH-input_bitwidth))
round_val = torch.round(norm_val)
clamp_val = torch.clamp(round_val,-128, 127).type(torch.int8).to('cuda')
# return quantised int8 and exponent
return clamp_val, act_exp
def TiInt8ToFloat(input):
'''
Convert int tensors to float tensors
Could be used to verify the functionality of integer NN
'''
act_in, exp_in = input
return act_in.float() * (2**(exp_in.float()))
def TensorBitwidth(val):
return int(torch.ceil(torch.log2(val.float())))
def RangeEstimate(input):
'''
Determine the activation range
'''
range= torch.max(torch.abs(input).float())
if range ==0:
return 0
else:
return TensorBitwidth(range)
class TiConv2d(UpdateWeight):
'''
conv 3x3 with dilation 1, stride 1, padding 1
NHWC format
'''
def __init__(self, in_channels, out_channels, kernel_size = 3, stride =1, padding =1, first_layer=False):
super(TiConv2d, self).__init__()
self.weight, self.weight_exp = WEIGHT_INIT_METHOD(torch.Size([out_channels, kernel_size, kernel_size, in_channels]))
self.first_layer = first_layer
self.stride = stride
self.padding = padding
def forward(self, input):
self.act, self.act_in_exp = input
temp = conv2d_int8(self.act, self.weight, self.stride, self.padding)
act_out, exp_out = act_calc(temp, self.act_in_exp + self.weight_exp)
return act_out, exp_out
def backward(self, input):
err_in, self.err_exp = input
""" first layer doesn't need to back prop error """
if not self.first_layer:
weight_transposed = torch.flip(self.weight,[1,2]).transpose(0,3).contiguous()
err_out_int32 = conv2d_int8(err_in, weight_transposed, stride=self.stride, padding=self.padding)
err_out, shift_bits = err_calc(err_out_int32)
self.err_exp += (shift_bits + self.weight_exp)
""" calculate weight gradient
the first layer use a group conv trick to accelerate like the pytorch code in torch.nn.grad.conv2d_weight
"""
if self.first_layer:
if self.act.size(0) % 4 == 0 and self.stride == 1:
self.grad_int32acc = conv2d_weight_int8(input = self.act,
weight_size = self.weight.shape,
grad_output = err_in,
stride = self.stride,
padding = self.padding)
else:
""" the last batch may have odd size, current cuda extension doesn't support it"""
self.grad_int32acc = torch.nn.grad.conv2d_weight(input = self.act.permute(0,3,1,2).contiguous().float(),
weight_size = torch.Size([self.weight.size(0),
self.weight.size(3),
self.weight.size(1),
self.weight.size(2)]),
grad_output = err_in.permute(0,3,1,2).contiguous().float(),
stride = self.stride,
padding = self.padding).permute(0,2,3,1).contiguous()
else:
act_transposed = self.act.transpose(0,3).contiguous()
err_in_transposed = err_in.transpose(0,3).contiguous()
self.grad_int32acc = conv2d_int8(act_transposed, err_in_transposed, stride=self.stride, padding=self.padding).transpose(0,3).contiguous()
self.weight_update()
if not self.first_layer:
return err_out, self.err_exp
class TiDropout(Module):
'''
Integer Dropout layer
'''
def forward(self, | |
"vibrational")
def __init__(
self,
input_dir: str = ".",
output_dir: str = ".",
wanted_files: Optional[Iterable[Union[str, Path]]] = None,
quantum_software: str = "gaussian",
):
"""
Parameters
----------
input_dir : str or path-like object, optional
Path to directory containing files for extraction, defaults to current
working directory.
output_dir : str or path-like object, optional
Path to directory for output files, defaults to current working directory.
wanted_files : list of str or list of Path, optional
List of files or filenames representing wanted files. If not given, all
files are considered wanted. File extensions are ignored.
quantum_software : str
A name of the quantum chemical computations software used to obtain data.
Used by ``tesliper`` to figure out, which parser to use, if custom parsers
are available.
"""
self.conformers = gw.Conformers()
self.wanted_files = wanted_files
self.input_dir = input_dir
self.output_dir = output_dir
self.spectra = dict()
self.averaged = dict()
self.experimental = dict()
self.parameters = self.standard_parameters
self.quantum_software = quantum_software.lower()
if self.quantum_software not in ex.parser_base._PARSERS:
logger.warning(
f"Unsupported quantum chemistry software: {quantum_software}. "
"Automatic data extraction will not be available."
)
def __getitem__(self, item: str) -> gw.conformers.AnyArray:
try:
return self.conformers.arrayed(item)
except ValueError:
raise KeyError(f"Unknown genre '{item}'.")
def clear(self):
"""Remove all data from the instance."""
self.conformers.clear()
self.wanted_files = []
self.input_dir = ""
self.output_dir = ""
self.spectra = dict()
self.averaged = dict()
self.experimental = dict()
self.parameters = self.standard_parameters
@property
def temperature(self) -> float:
"""Temperature of the system expressed in Kelvin units.
Value of this parameter is passed to :term:`data array`\\s created with the
:meth:`.Conformers.arrayed` method, provided that the target data array class
supports a parameter named *t* in it's constructor.
.. versionadded:: 0.9.1
Raises
------
ValueError
if set to a value lower than zero.
Notes
-----
It's actually just a proxy to :meth:`self.conformers.temperatue
<.Conformers.temperature>`.
"""
return self.conformers.temperature
@temperature.setter
def temperature(self, value):
self.conformers.temperature = value
@property
def energies(self) -> Dict[str, gw.Energies]:
"""Data for each energies' genre as :class:`.Energies` data array. Returned
dictionary is of form {"genre": :class:`.Energies`} for each of the genres:
"scf", "zpe", "ten", "ent", and "gib". If no values are available for a specific
genre, an empty :class:`.Energies` array is produced as corresponding dictionary
value.
>>> tslr = Tesliper()
>>> tslr.energies
{
"scf": Energies(genre="scf", ...),
"zpe": Energies(genre="zpe", ...),
"ten": Energies(genre="ten", ...),
"ent": Energies(genre="ent", ...),
"gib": Energies(genre="gib", ...),
}
Returns
-------
dict
Dictionary with genre names as keys
and :class:`.Energies` data arrays as values.
"""
keys = gw.Energies.associated_genres
return {k: self[k] for k in keys}
@property
def activities(self) -> Dict[str, _activities_types]:
"""Data for default activities used to calculate spectra as appropriate
:class:`.SpectralActivities` subclass. Returned dictionary is of form {"genre":
:class:`.SpectralActivities`} for each of the genres: "dip", "rot", "vosc",
"vrot", "raman1", and "roa1". If no values are available for a specific genre,
an empty data array is produced as corresponding dictionary value.
>>> tslr = Tesliper()
>>> tslr.activities
{
"dip": VibrationalActivities(genre="dip", ...),
"rot": VibrationalActivities(genre="rot", ...),
"vosc": ElectronicActivities(genre="vosc", ...),
"vrot": ElectronicActivities(genre="vrot", ...),
"raman1": ScatteringActivities(genre="raman1", ...),
"roa1": ScatteringActivities(genre="roa1", ...),
}
Returns
-------
dict
Dictionary with genre names as keys and
:class:`.SpectralActivities` data arrays as values.
"""
keys = dw.DEFAULT_ACTIVITIES.values()
return {k: self[k] for k in keys}
@property
def wanted_files(self) -> Optional[Set[str]]:
"""Set of files that are desired for data extraction, stored as filenames
without an extension. Any iterable of strings or Path objects is transformed
to this form.
>>> tslr = Tesliper()
>>> tslr.wanted_files = [Path("./dir/file_one.out"), Path("./dir/file_two.out")]
>>> tslr.wanted_files
{"file_one", "file_two"}
May also be set to ``None`` or other "falsy" value, in such case it is ignored.
"""
return self._wanted_files
@wanted_files.setter
def wanted_files(self, files: Optional[Iterable[Union[str, Path]]]):
self._wanted_files = None if not files else {Path(f).stem for f in files}
@property
def standard_parameters(self) -> Dict[str, Dict[str, Union[int, float, Callable]]]:
"""Default parameters for spectra calculation for each spectra genre
(ir, vcd, uv, ecd, raman, roa). This returns a dictionary,
but in fact it is a convenience, read-only attribute,
modifying it will have no persisting effect.
"""
return {key: params.copy() for key, params in self._standard_parameters.items()}
def update(self, other: Optional[Dict[str, dict]] = None, **kwargs):
"""Update stored conformers with given data.
Works like ``dict.update``, but if key is already present, it updates
dictionary associated with given key rather than assigning new value.
Keys of dictionary passed as positional parameter (or additional keyword
arguments given) should be conformers' identifiers and its values should be
dictionaries of ``{"genre": values}`` for those conformers.
Please note, that values of status genres like 'optimization_completed'
and 'normal_termination' will be updated as well for such key,
if are present in given new values.
>>> tslr.conformers
Conformers([('one', {'scf': -100, 'stoichiometry': 'CH4'})])
>>> tslr.update(
... {'one': {'scf': 97}, 'two': {'scf': 82, 'stoichiometry': 'CH4'}}
... )
>>> tslr.conformers
Conformers([
('one', {'scf': 97, 'stoichiometry': 'CH4'}),
('two', {'scf': 82, 'stoichiometry': 'CH4'}),
])
"""
self.conformers.update(other, **kwargs)
@property
def input_dir(self) -> Path:
"""Directory, from which files should be read."""
return self.__input_dir
@input_dir.setter
def input_dir(self, path: Union[Path, str] = "."):
path = Path(path).resolve()
if not path.is_dir():
raise FileNotFoundError(
"Invalid path or directory not found: {}".format(path)
)
logger.info("Current working directory is: {}".format(path))
self.__input_dir = path
@property
def output_dir(self) -> Path:
"""Directory, to which generated files should be written."""
return self.__output_dir
@output_dir.setter
def output_dir(self, path: Union[Path, str] = "."):
path = Path(path).resolve()
path.mkdir(exist_ok=True)
logger.info("Current output directory is: {}".format(path))
self.__output_dir = path
def extract_iterate(
self,
path: Optional[Union[str, Path]] = None,
wanted_files: Optional[Iterable[str]] = None,
extension: Optional[str] = None,
recursive: bool = False,
) -> Generator[Tuple[str, dict], None, None]:
"""Extracts data from chosen Gaussian output files present in given directory
and yields data for each conformer found.
Uses :attr:`Tesliper.input_dir` as source directory and
:attr:`Tesliper.wanted_files` list of chosen files if these are not explicitly
given as 'path' and 'wanted_files' parameters.
Parameters
----------
path : str or pathlib.Path, optional
Path to directory, from which Gaussian files should be read.
If not given or is ``None``, :attr:`Tesliper.output_dir` will be used.
wanted_files : list of str, optional
Filenames (without a file extension) of conformers that should be extracted.
If not given or is ``None``, :attr:`Tesliper.wanted_files` will be used. If
:attr:`Tesliper.wanted_files` is also ``None``, all found Gaussian output
files will be parsed.
extension : str, optional
Only files with given extension will be parsed. If omitted, Tesliper will
try to guess the extension from contents of input directory.
recursive : bool
If ``True``, also subdirectories are searched for files to parse, otherwise
subdirectories are ignored. Defaults to ``False``.
Yields
------
tuple
Two item tuple with name of parsed file as first and extracted
data as second item, for each Gaussian output file parsed.
"""
soxhlet = ex.Soxhlet(
path=path or self.input_dir,
purpose=self.quantum_software,
wanted_files=wanted_files or self.wanted_files,
extension=extension,
recursive=recursive,
)
for file, data in soxhlet.extract_iter():
self.update(((file, data),))
yield file, data
def extract(
self,
path: Optional[Union[str, Path]] = None,
wanted_files: Optional[Iterable[str]] = None,
extension: Optional[str] = None,
recursive: bool = False,
):
"""Extracts data from chosen Gaussian output files present in given directory.
Uses :attr:`Tesliper.input_dir` as source directory and
:attr:`Tesliper.wanted_files` list of chosen files if these are not explicitly
given as *path* and *wanted_files* parameters.
Parameters
----------
path : str or pathlib.Path, optional
Path to directory, from which Gaussian files should be read.
If not given or is ``None``, :attr:`Tesliper.output_dir` will be used.
wanted_files : list of str, optional
Filenames (without a file extension) of conformers that should be extracted.
If not given or is ``None``, :attr:`Tesliper.wanted_files` will be used.
extension : str, optional
Only files with given extension will be parsed. If omitted, Tesliper will
try to guess the extension from contents of input directory.
recursive : bool
If ``True``, also subdirectories are searched for files to parse, otherwise
subdirectories are ignored. Defaults to ``False``.
"""
for f, d in self.extract_iterate(path, wanted_files, extension, recursive):
_ = f, d
def load_parameters(
self,
path: Union[str, Path],
spectra_genre: Optional[str] = None,
) -> dict:
"""Load calculation parameters from a file.
Parameters
----------
path : str or pathlib.Path, optional
Path to the file with desired parameters specification.
spectra_genre : str, optional
Genre of | |
quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
#%% datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
ntrain=1
nvalid=1
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% Extract N and M in input
N=X_np.shape[0]
M=U_np.shape[0]
if M<N:
print("Error: embedding dimension M cannot be smaller then N")
return
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
print(RXT)
#%% unitary rigging of X
# XT=tf.constant(X_np)
#%% random unitary matrix
U=tf.constant(U_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random.uniform([M,M],dtype=tfrealdatatype),tf.random.uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(M):
xtrains[i,j]=1.0
#%% normalize training set
xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(M):
xvalids[i,j]=1.0
#%% normalize validation set
xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% projector that extract the first N rows from a vector M
#project=tf.constant(quantumgates.projector(N,M,npdatatype),dtype=tfdatatype)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
#clipping the weigths
Wreal=tf.clip_by_value(Wreal,realMIN,realMAX)
Wimag=tf.clip_by_value(Wimag,imagMIN,imagMAX)
# quantize
Wreal=tf.quantization.quantize_and_dequantize(Wreal,realMIN,realMAX,signed_input=False,num_bits=quantizedbits)
Wimag=tf.quantization.quantize_and_dequantize(Wimag,imagMIN,imagMAX,signed_input=False,num_bits=quantizedbits)
# build the matrices (phase only modulator)
#W=tf.complex(cWreal,cWimag)
W=tf.complex(tf.cos(Wreal),tf.sin(Wreal))
transfer_matrix=tf.matmul(U,W)
equation=tf.matmul(transfer_matrix,x)-yt
eqreal=tf.math.real(equation)
eqimag=tf.math.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' N ' + repr(N) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
rWfinal=Wreal.eval()
iWfinal=Wimag.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>2:
print("Final Wreal")
utilities.printonscreennp(rWfinal)
print("Final Wimag")
utilities.printonscreennp(iWfinal)
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=X_np.shape[0]
out['M']=M
out['X']=X_np
return out, Wfinal, Tfinal, Tinitial
def complexqtzd(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
epochs=10,display_steps=100,
realMIN=-1.0, realMAX=1.0,
imagMIN=0.0, imagMAX=0.0,
nbits=8):
#%% Train a single input SLM with complex matrix
# Given a gate with size N, generate a random unitary matrix and
# use a NN to train an input gate to act as the input unitary class
#
# Input:
# X_np, gate as numpy matrix
# U_np, unitary matrix for medium
# verbose, 0 no output, 1 minimal, 2 steps, 3 all
#
# Use single input SLM with complex matrix
#
# WrealMAX, WrealMIN, maximal and minimal value for Wreal
#
# WimagMAX, WimagMIN, maximal and minimal value for Wimag
# If WimagMAX=WimagMIN=0 is a amplitude modulator
#%%
from utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
# datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
ntrain=1
nvalid=1
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% Extract N and M in input
N=X_np.shape[0]
M=U_np.shape[0]
if M<N:
print("Error: embedding dimension M cannot be smaller then N")
return
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
#%% random unitary matrix
U=tf.constant(U_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random.uniform([M,M],dtype=tfrealdatatype),tf.random.uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% current output
yC=tf.complex(tf.random.uniform([M,1],dtype=tfrealdatatype),tf.random.uniform([M,1],dtype=tfrealdatatype))
yout=tf.get_variable("current_y",initializer=yC,trainable=False)
yt=tf.get_variable("target_y",initializer=yC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set, one single input all 1 to N, M-N zeros
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(N):
xtrains[i,j]=1.0
#%% normalize training set
#xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set (here equal to the training)
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(N):
xvalids[i,j]=1.0
#%% normalize validation set
#xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
#clipping the weigths
Wreal=tf.clip_by_value(Wreal,realMIN,realMAX)
Wimag=tf.clip_by_value(Wimag,imagMIN,imagMAX)
# quantize
Wreal=tf.quantization.quantize_and_dequantize(Wreal,realMIN,realMAX,signed_input=False,num_bits=nbits)
Wimag=tf.quantization.quantize_and_dequantize(Wimag,imagMIN,imagMAX,signed_input=False,num_bits=nbits)
# build the matrices (phase only modulator)
W=tf.complex(Wreal,Wimag)
transfer_matrix=tf.matmul(U,W)
yout=tf.matmul(transfer_matrix,x)
equation=yout-yt
eqreal=tf.math.real(equation)
eqimag=tf.math.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' N ' + repr(N) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
ytargetf=sess.run(yt, feed_dict={x: xtmp})
youtf=sess.run(yout, feed_dict={x: xtmp})
rWfinal=Wreal.eval()
iWfinal=Wimag.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>2:
print("Final Wreal")
utilities.printonscreennp(rWfinal)
print("Final Wimag")
utilities.printonscreennp(iWfinal)
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=X_np.shape[0]
out['M']=M
out['X']=X_np
out['xtrain']=xtrains
out['Wreal']=rWfinal
out['Wimag']=iWfinal
out['Wfinal']=Wfinal
out['Tfinal']=Tfinal
out['yt']=ytargetf
out['y']=youtf
out['cost']=avg_cost_valid
return out
def complex(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
epochs=10,display_steps=100,
realMIN=-1.0, realMAX=1.0,
imagMIN=0.0, imagMAX=0.0):
#%% Train a single input SLM with complex matrix
# Given a gate with size N, generate a random unitary matrix and
# use a NN to train an input gate to act as the input unitary class
#
# Input:
# X_np, gate as numpy matrix
# U_np, unitary matrix for medium
# verbose, 0 no output, 1 minimal, 2 steps, 3 all
#
# Use single input SLM with complex matrix
#
# WrealMAX, WrealMIN, | |
<filename>SPGPylibs/PHItools/phifdt_pipe_modules.py
### phifdt_pipe_modules
import numpy as np
import random, statistics
from scipy.ndimage import gaussian_filter
from matplotlib import pyplot as plt
from .tools import printc,bcolors,fix_path
from .phi_fits import fits_get
from .phi_rte import phi_rte
from .phi_utils import find_string,azimutal_average,newton,limb_darkening,genera_2d
from .phi_gen import bin_annulus,shift,find_center,apod,rebin,generate_circular_mask
from .phi_reg import shift_subp,moments
import SPGPylibs.GENtools.plot_lib as plib
from platform import node
MILOS_EXECUTABLE = 'milos.'+node().split('.')[0]
import os
def phi_correct_dark(dark_f,data,header,data_scale,verbose = False,get_dark = False):
#-----------------
# READ AND CORRECT DARK FIELD
#-----------------
printc('-->>>>>>> Reading Darks ',color=bcolors.OKGREEN)
printc(' Input should be [y-dim,x-dim].',color=bcolors.OKGREEN)
printc(' DARK IS DIVIDED by 256. ',color=bcolors.OKGREEN)
try:
dark,dark_header = fits_get(dark_f)
dark = dark / 256.
except Exception:
printc("ERROR, Unable to open darks file: {}",dark_f,color=bcolors.FAIL)
raise
# locations = find_string(dark_f,'_')
# try:
# DID = dark_f[locations[-1]+1:locations[-1]+10]
# print('DID: ',np.float(DID))
# except Exception:
# printc("Unable to get DID: {}",dark_f,color=bcolors.FAIL)
# printc('DID: ',DID,' -->> NOT A NUMBER',color=bcolors.FAIL)
# raise
DID = dark_header['PHIDATID']
printc('Dark DID: ',DID,color=bcolors.OKBLUE)
dark_scale = fits_get(dark_f,scaling = True)
if dark_scale["Present"][0] == data_scale["Present"][0]:
scaling = dark_scale["scaling"][0] / data_scale["scaling"][0]
else:
scaling = dark_scale["scaling"][1] / data_scale["scaling"][1] * dark_scale["scaling"][0]
if scaling != 1:
printc(' checking scalling and correcting for it in the dark.',dark_scale,data_scale,scaling,color=bcolors.WARNING)
dark = dark * scaling
if get_dark: #for kll
printc('-->>>>>>> Dark is output in phi_correct_dark()',color=bcolors.OKGREEN)
return dark, scaling
printc('-->>>>>>> Correcting dark current.',color=bcolors.OKGREEN)
PXBEG1 = int(header['PXBEG1']) - 1
PXEND1 = int(header['PXEND1']) - 1
PXBEG2 = int(header['PXBEG2']) - 1
PXEND2 = int(header['PXEND2']) - 1
#CHECK NACC
acc = int(header['ACCACCUM']) * int(header['ACCCOLIT'])
acc_dark = int(dark_header['ACCACCUM']) * int(dark_header['ACCCOLIT'])
if acc != acc_dark:
printc('WARNING - NACC NOT IDENTICAL DURING DARK CORRECTION',color=bcolors.FAIL)
printc('DARK NACC ',acc_dark,' DATA NACC ',acc,color=bcolors.FAIL)
if verbose:
dummy = data[0,0,:,:]
data = data - dark[np.newaxis,np.newaxis,PXBEG2:PXEND2+1,PXBEG1:PXEND1+1]
data = np.abs(data)
if 'CAL_DARK' in header: # Check for existence
header['CAL_DARK'] = DID
if verbose:
md = np.mean(dark)
if verbose != True:
plib.show_three(dark,dummy,data[0,0,:,:],vmin=[-md,-md,-md],vmax=[md*2,md*2,md*2],block=True,pause=0.1,title=['Dark','Data','Data after dark correction'],
xlabel='Pixel',ylabel='Pixel',cmap='gray',save=verbose)
else:
plib.show_three(dark,dummy,data[0,0,:,:],vmin=[-md,-md,-md],vmax=[md*2,md*2,md*2],block=True,pause=0.1,title=['Dark','Data','Data after dark correction'],
xlabel='Pixel',ylabel='Pixel',cmap='gray')
return data,header
def interpolateImages(image1, image2, dist1I, distI2):
''' interpolate 2D images -
'''
imageInterp = (image1 * distI2 + image2 * dist1I) / (dist1I + distI2)
return imageInterp
def phi_correct_prefilter(prefilter_fits,header,data,voltagesData,verbose = False):
printc('-->>>>>>> Read prefilter and correct for it ')
printc(' ',prefilter_fits,' ')
PXBEG1 = int(header['PXBEG1']) - 1
PXEND1 = int(header['PXEND1']) - 1
PXBEG2 = int(header['PXBEG2']) - 1
PXEND2 = int(header['PXEND2']) - 1
xd = int(header['NAXIS1'])
yd = int(header['NAXIS2'])
zd = int(header['NAXIS3'])
prefdata,h = fits_get(prefilter_fits)
prefdata = prefdata.astype(float)
prefdata = prefdata[:,PXBEG2:PXEND2+1,PXBEG1:PXEND1+1]
#PREFILTER INFO
#——————
#I call the prefilter correction from the python pipeline like this:
#data_corr = np.reshape(data_corr,(6,4,2048, 2048)) (6,4,y,x)
prefscale = 8388608./2.
# the scale of the prefilter is irrelevant because it is a relative variation
# data = data / prefilter. Scale = 2 makes the prefilter to be aroung 1
# later, everything is normalized wrt the continuum so this is not important.
scale = 1.
prefVoltages = [-1300, -1234, -1169, -1103, -1038, -972, -907, -841, -776,\
-710, -645, -579, -514, -448, -383, -317, -252, -186,-121, -56,9,74,\
140,205,271,336,402,467,533,598,664,729,795,860,926,991,1056,1122,1187,\
1253,1318,1384,1449,1515,1580,1646,1711,1777,1842]
if verbose:
datap = np.copy(data)
data = applyPrefilter_dos(data, voltagesData, prefdata, prefscale, prefVoltages, -1,scaledown = scale, verbose = verbose)
if verbose:
plt.plot(data[:,0,yd//2,xd//2],'o-',label='corrected')
plt.plot(datap[:,0,yd//2,xd//2],'--',label='original')
plt.legend()
plt.show()
del datap
slash,_ = find_string(prefilter_fits,'/')
if 'CAL_PRE' in header: # Check for existence
header['CAL_PRE'] = prefilter_fits[slash[-1]+1:-4]
else:
header.set('CAL_PRE', prefilter_fits[slash[-1]+1:-4], 'prefilter file',after='CAL_DARK')
return data,header
def applyPrefilter(data, wvltsData, prefilter, prefScale, wvltsPref, direction, scaledown=8,verbose = False):
'''PHI prefilter. Version from <NAME>.
'''
prefToApply = np.zeros((6,prefilter.shape[1],prefilter.shape[2]))
#prefilter = prefilter/prefScale
for i in range(0,6):
wvlCurr = wvltsData[i]
valueClosest = min(wvltsPref, key=lambda x:abs(x-wvlCurr))
if verbose:
print("iter", i, "wvlCurr", wvlCurr)
print("iter", i, "valueClosest", valueClosest)
indexClosest = wvltsPref.index(valueClosest)
if verbose:
print("iter", i, "indexClosest", indexClosest)
if (valueClosest < wvlCurr):
indexBefore = indexClosest
indexAfter = indexClosest + 1
else:
indexAfter = indexClosest
indexBefore = indexClosest - 1
dist1I = abs(wvltsPref[indexBefore] - wvltsData[i])
distI2 = abs(wvltsPref[indexAfter] - wvltsData[i])
prefToApply[i,:,:] = interpolateImages(prefilter[indexBefore], prefilter[indexAfter], dist1I, distI2)
if verbose:
print("mean prefValue Before:", np.mean(prefilter[indexBefore])*256)
print("mean prefValue After:", np.mean(prefilter[indexAfter])*256)
print("distance1:", dist1I)
print("distance2:", distI2)
print("percentage:", distI2 / (dist1I + distI2))
#Remove scale factor from prefilter
if verbose:
print("mean prefilter:", np.mean(prefToApply[i,:,:])*256)
prefToApply[i,:,:] = prefToApply[i,:,:] / prefScale
if verbose:
print("mean prefilter:", np.mean(prefToApply[i,:,:]))
if verbose:
print("Reshaping prefilter:")
print(prefToApply.shape)
print(data.shape)
if(data.shape[2] != prefToApply.shape[1]):
FOV_Start_y = int(prefToApply.shape[1]/2 - data.shape[2]/2)
FOV_End_y = int(prefToApply.shape[1]/2 + data.shape[2]/2)
prefToApply = prefToApply[:,FOV_Start_y:FOV_End_y,:]
if verbose:
print(prefToApply.shape)
if(data.shape[3] != prefToApply.shape[2]):
FOV_Start_x = int(prefToApply.shape[2]/2 - data.shape[3]/2)
FOV_End_x = int(prefToApply.shape[2]/2 + data.shape[3]/2)
prefToApply = prefToApply[:,:,FOV_Start_x:FOV_End_x]
if verbose:
print(prefToApply.shape)
dataPrefApplied = np.zeros(data.shape)
for i in range(0,4):
if(direction == 1):
dataPrefApplied[:,i,:,:] = data[:,i,:,:] * prefToApply
elif(direction == -1):
dataPrefApplied[:,i,:,:] = data[:,i,:,:] / prefToApply / scaledown
else:
print("Ivnalid direction! Must be 1 (mult) or -1 (div).")
return dataPrefApplied
#/**
# * This is the maximum range scaled from the division of the prefilter.
# * The smallest number in the prefilter is 0.13977 -> 1/0.13977 = 7.3 ~= 8
# */
#define RNG_RES_APPL_PREF 8 -> reason for division by 8.
def applyPrefilter_dos(data, wvltsData, prefilter, prefScale, wvltsPref, direction, scaledown=8,verbose = False):
'''PHI prefilter. Modified version from <NAME>.
'''
prefToApply = np.zeros((6,prefilter.shape[1],prefilter.shape[2]))
prefilter = prefilter / prefScale #dos
for i in range(0,6):
wvlCurr = wvltsData[i]
valueClosest = min(wvltsPref, key=lambda x:abs(x-wvlCurr))
if verbose:
print("iter", i, "wvlCurr", wvlCurr)
print("iter", i, "valueClosest", valueClosest)
indexClosest = wvltsPref.index(valueClosest)
if verbose:
print("iter", i, "indexClosest", indexClosest)
if (valueClosest < wvlCurr):
indexBefore = indexClosest
indexAfter = indexClosest + 1
else:
indexAfter = indexClosest
indexBefore = indexClosest - 1
dist1I = abs(wvltsPref[indexBefore] - wvltsData[i])
distI2 = abs(wvltsPref[indexAfter] - wvltsData[i])
prefToApply[i,:,:] = interpolateImages(prefilter[indexBefore], prefilter[indexAfter], dist1I, distI2)
if verbose:
print("mean prefValue Before:", np.mean(prefilter[indexBefore])*256)
print("mean prefValue After:", np.mean(prefilter[indexAfter])*256)
print("distance1:", dist1I)
print("distance2:", distI2)
print("percentage:", distI2 / (dist1I + distI2))
#Remove scale factor from prefilter
if verbose:
print("mean prefilter:", np.mean(prefToApply[i,:,:])*256)
#prefToApply[i,:,:] = prefToApply[i,:,:] / prefScale #dos
if verbose:
print("mean prefilter:", np.mean(prefToApply[i,:,:]))
if verbose:
print("Reshaping prefilter:")
print(prefToApply.shape)
print(data.shape)
if(data.shape[2] != prefToApply.shape[1]):
FOV_Start_y = int(prefToApply.shape[1]/2 - data.shape[2]/2)
FOV_End_y = int(prefToApply.shape[1]/2 + data.shape[2]/2)
prefToApply = prefToApply[:,FOV_Start_y:FOV_End_y,:]
if verbose:
print(prefToApply.shape)
if(data.shape[3] != prefToApply.shape[2]):
FOV_Start_x = int(prefToApply.shape[2]/2 - data.shape[3]/2)
FOV_End_x = int(prefToApply.shape[2]/2 + data.shape[3]/2)
prefToApply = prefToApply[:,:,FOV_Start_x:FOV_End_x]
if verbose:
print(prefToApply.shape)
dataPrefApplied = np.zeros(data.shape)
for i in range(0,4):
if(direction == 1):
dataPrefApplied[:,i,:,:] = data[:,i,:,:] * prefToApply
elif(direction == -1):
dataPrefApplied[:,i,:,:] = data[:,i,:,:] / prefToApply # / scaledown #dos
else:
print("Ivnalid direction! Must be 1 (mult) or -1 (div).")
return dataPrefApplied
def phi_apply_demodulation(data,instrument,header = False,demod=False,verbose = False):
'''
Use demodulation matrices to demodulate data size (n_wave*S_POL,N,M)
ATTENTION: FDT40 is fixed to the one Johann is using!!!!
'''
if instrument == 'FDT40':
mod_matrix_40 = np.array([[1.0006,-0.7132, 0.4002,-0.5693],
[1.0048, 0.4287,-0.7143, 0.5625],
[0.9963, 0.4269,-0.3652,-0.8229],
[0.9983,-0.4022, 0.9001, 0.1495]])
demodM = np.linalg.inv(mod_matrix_40)
# Johanns (it is the average in the central area of the one onboard)
demodM = np.array([[0.168258, 0.357277, 0.202212, 0.273266],\
[-0.660351, 0.314981, 0.650029, -0.299685],\
[ 0.421242, 0.336994, -0.183068, -0.576202],\
[-0.351933, 0.459820, -0.582167, 0.455458]])
elif instrument == 'FDT45':
mod_matrix_45 = np.array([[1.0035,-0.6598, 0.5817,-0.4773],
[1.0032, 0.5647, 0.5275, 0.6403],
[0.9966, 0.4390,-0.5384,-0.7150],
[0.9968,-0.6169,-0.6443, 0.4425]])
demodM = np.linalg.inv(mod_matrix_45)
elif instrument == 'HRT40':
mod_matrix_40 = np.array([[1.0040,-0.6647, 0.5928,-0.4527],
[1.0018, 0.5647, 0.5093, 0.6483],
[0.9964, 0.4348,-0.5135,-0.7325],
[0.9978,-0.6128,-0.6567, 0.4283]]) #HREW
demodM = np.linalg.inv(mod_matrix_40)
elif instrument == 'HRT45':
mod_matrix_45_dos = np.array([[1.00159,-0.50032, 0.7093,-0.4931],
[1.0040, 0.6615, 0.3925, 0.6494],
[0.9954, 0.3356,-0.6126,-0.7143],
[0.9989,-0.7474,-0.5179, 0.4126]]) #MIA
demodM = np.linalg.inv(mod_matrix_45_dos)
else:
printc('No demod available in demod_phi.py',color = bcolors.FAIL)
raise SystemError()
printc('Demodulation matrix for ', instrument,color = bcolors.WARNING)
printc(demodM,color = bcolors.WARNING)
if demod:
return demodM
ls,ps,ys,xs = data.shape
for i in range(ls):
data[i,:,:,:] = np.reshape(np.matmul(demodM,np.reshape(data[i,:,:,:],(ps,xs*ys))),(ps,ys,xs))
if header != False:
if 'CAL_IPOL' in header: # Check for existence
header['CAL_IPOL'] = instrument
else:
header.set('CAL_IPOL', instrument, 'Onboard calibrated for instrumental polarization',after='CAL_DARK')
return data, header
else:
return data
def crosstalk_ItoQUV(data_demod,verbose=False,npoints=2000):
limit=0.2
PLT_RNG = 3
my = []
sy = []
x = data_demod[:,0,:,:].flatten()
ids = x > limit
x = x[ids].flatten()
N = x.size
idx = random.sample(range(N),npoints)
mx = x[idx].mean()
sx = x[idx].std()
xp = np.linspace(x.min(), x.max(), 100)
A = np.vstack([x, np.ones(len(x))]).T
# I to Q
yQ = data_demod[:,1,:,:].flatten()
yQ = yQ[ids].flatten()
my.append(yQ[idx].mean())
sy.append(yQ[idx].std())
# cQ = np.polyfit( x , yQ , 1)
# pQ = np.poly1d(cQ)
m, c = np.linalg.lstsq(A, yQ, rcond=None)[0]
cQ = [m,c]
pQ = np.poly1d(cQ)
# I to U
yU = data_demod[:,2,:,:].flatten()
yU = yU[ids].flatten()
my.append(yU[idx].mean())
sy.append(yU[idx].std())
# cU = | |
close=True)
rxn_df = table_lookup_update(rxn_df,
('reaction_id', rid),
('plot_path', fpath))
else:
logger.info(" missing Arrhenius coefficients; skipping...")
logger.info("Writing updated reaction table to {:s}".format(rxn_csv_out))
write_table_to_csv(rxn_df, rxn_csv_out)
def read_thermo_data(spc_csv):
""" a dictionary of thermo values (H298), indexed by species ID
"""
thv_dct = None
spc_df = pandas.read_csv(spc_csv)
if 'therm_val' in spc_df:
thv_dct = dict(zip(spc_df['species_id'], spc_df['therm_val']))
return thv_dct
def chemkin_to_csv(mech_txt, thm_txt, rxn_csv_out, spc_csv_out, logger):
""" parse CHEMKIN files
"""
from .pchemkin import species as chemkin_species
from .pchemkin import reactions as chemkin_reactions
from .pchemkin import (thermodynamics_dictionaries as
chemkin_thermodynamics_dictionaries)
from .pchemkin import kinetics as chemkin_kinetics
logger.info("Reading in {:s}".format(mech_txt))
mech_str = read_file(mech_txt)
if thm_txt:
logger.info("Reading in {:s}".format(thm_txt))
thm_str = read_file(thm_txt)
else:
logger.info("No thermo file. Looking for thermo data in {:s}."
.format(mech_txt))
thm_str = mech_str
logger.info("Finding species")
spcs = chemkin_species(mech_str)
spc_ck_idxs = tuple(range(1, len(spcs)+1))
logger.info("Finding reactions")
rxns = chemkin_reactions(mech_str)
rxn_ck_idxs = tuple(range(1, len(rxns)+1))
logger.info("Finding thermodynamics data")
thm_dcts = chemkin_thermodynamics_dictionaries(thm_str)
logger.info("Finding kinetics data")
kin_lst, reacs = chemkin_kinetics(mech_str)
spc_df = table_from_columns((spc_ck_idxs, spcs),
('chemkin_index', 'species'))
rxn_df = table_from_columns((rxn_ck_idxs, rxns),
('chemkin_index', 'reaction'))
for rxn in rxns:
if rxn not in reacs:
logger.info(rxn)
if kin_lst:
assert len(kin_lst) == len(rxns)
arrh_cols = tuple(zip(*kin_lst))
rxn_df = append_table_columns(rxn_df, arrh_cols, ARRH_COL_KEYS)
if thm_dcts:
nasa_lo_dct, nasa_hi_dct, nasa_t_dct = thm_dcts
nasa_lo_cols = tuple(zip(*map(nasa_lo_dct.__getitem__, spcs)))
nasa_hi_cols = tuple(zip(*map(nasa_hi_dct.__getitem__, spcs)))
nasa_t_cols = tuple(zip(*map(nasa_t_dct.__getitem__, spcs)))
thm_col_keys = NASA_LO_COL_KEYS + NASA_HI_COL_KEYS + NASA_T_COL_KEYS
thm_cols = nasa_lo_cols + nasa_hi_cols + nasa_t_cols
spc_df = append_table_columns(spc_df, thm_cols, thm_col_keys)
logger.info("Writing species to {:s}".format(spc_csv_out))
write_table_to_csv(spc_df, spc_csv_out)
logger.info("Writing reactions to {:s}".format(rxn_csv_out))
write_table_to_csv(rxn_df, rxn_csv_out)
def chemkin_id_reactions(rxn_csv, spc_csv, rxn_csv_out, spc_csv_out, logger):
""" determine reaction identifiers for CHEMKIN reactions
"""
from .iohelp import translate_chemkin_reaction
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
logger.info("Reading in {:s}".format(spc_csv))
spc_df = pandas.read_csv(spc_csv)
assert table_has_column_keys(spc_df, ('species', 'species_id'))
logger.info("Canonicalizing species IDs")
sids = table_column(spc_df, 'species_id')
can_sids = tuple(map(canonical_species_identifier, sids))
spc_df = set_table_column(spc_df, 'species_id', can_sids)
sid_dct = dict(zip(*table_columns(spc_df, ('species', 'species_id'))))
rxns = table_column(rxn_df, 'reaction')
rids = tuple(translate_chemkin_reaction(rxn, sid_dct) for rxn in rxns)
can_rids = tuple(canonical_reaction_identifier(rid) if rid else None
for rid in rids)
rxn_df = set_table_column(rxn_df, 'reaction_id', can_rids)
spc_df = move_table_column_to_front(spc_df, 'species_id')
rxn_df = move_table_column_to_front(rxn_df, 'reaction_id')
logger.info("Writing species to {:s}".format(spc_csv_out))
write_table_to_csv(spc_df, spc_csv_out)
logger.info("Writing reactions to {:s}".format(rxn_csv_out))
write_table_to_csv(rxn_df, rxn_csv_out)
def reactions_to_chemkin(cls, rxn_csv, spc_csv, mech_txt_out, logger):
""" generate CHEMKIN files from CSVs
"""
rct_sid_col_keys = dict(REACTANT_SID_COL_KEYS)[cls]
prd_sid_col_keys = dict(PRODUCT_SID_COL_KEYS)[cls]
logger.info("Reading in {:s}".format(spc_csv))
spc_df = pandas.read_csv(spc_csv)
spc_col_keys = table_column_keys(spc_df)
assert 'species' in spc_col_keys and 'species_id' in spc_col_keys
spc_dct = dict(zip(*table_columns(spc_df, ('species_id', 'species'))))
def _chemkin_reaction_name(rct_sids, prd_sids):
rct_str = '+'.join(map(spc_dct.__getitem__, rct_sids))
prd_str = '+'.join(map(spc_dct.__getitem__, prd_sids))
rxn = '='.join([rct_str, prd_str])
return rxn
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
rxn_col_keys = table_column_keys(rxn_df)
assert all(col_key in rxn_col_keys for col_key in rct_sid_col_keys)
assert all(col_key in rxn_col_keys for col_key in prd_sid_col_keys)
rct_sids_lst = tuple(zip(*table_columns(rxn_df, rct_sid_col_keys)))
prd_sids_lst = tuple(zip(*table_columns(rxn_df, prd_sid_col_keys)))
rxns = tuple(starmap(_chemkin_reaction_name,
zip(rct_sids_lst, prd_sids_lst)))
assert all(col_key in rxn_col_keys for col_key in ARRH_COL_KEYS)
arrh_cfts_lst = zip(*table_columns(rxn_df, ARRH_COL_KEYS))
rxn_fmt = '{:{width}s} {:10.3e} {:8.3f} {:12.3f}'
rxn_wd = max(map(len, rxns)) + 5
format_ = partial(rxn_fmt.format, width=rxn_wd)
rxn_block_str = '\n'.join(
format_(rxn, *arrh_cfts) for rxn, arrh_cfts in zip(rxns, arrh_cfts_lst)
if not any(map(is_empty_table_value, arrh_cfts)))
mech_str = '\n'.join(['REACTIONS', rxn_block_str, 'END'])
logger.info("Writing reactions to {:s}".format(mech_txt_out))
write_file(mech_txt_out, mech_str)
def reactions_init(cls, rxn_csv, spc_csv, rxn_csv_out, cdt_csv_out, logger):
""" initialize reactions
"""
_init = reactions_initializer(
cls=cls,
is_candidate=dict(REACTION_CANDIDATE_FINDERS)[cls],
reaction=dict(REACTION_FINDERS)[cls],
sid_cols=dict(REACTION_SID_COL_KEYS)[cls],
idx_cols=dict(REACTION_IDX_COL_KEYS)[cls]
)
return _init(spc_csv, rxn_csv, rxn_csv_out, cdt_csv_out, logger)
def reactions_run_old(cls, rxn_csv, spc_csv, rxn_rng_strs, tpl_txt, nodes,
run_dir, id2path, job_argv, logger):
""" reactions runner
"""
_run = reactions_runner(
cls=cls,
reaction_xyz_strings=dict(REACTION_XYZ_STRING_MAKERS)[cls],
reaction_input_string=dict(REACTION_INPUT_STRING_MAKERS)[cls],
sid_cols=dict(REACTION_SID_COL_KEYS)[cls],
idx_cols=dict(REACTION_IDX_COL_KEYS)[cls]
)
_run(spc_csv, rxn_csv, tpl_txt, rxn_rng_strs, nodes, run_dir, id2path,
job_argv, logger)
def divide(key, dir1, dir2, rxn_csv, rxn_csv_out, logger):
""" split reactions by key
"""
from .strid import is_radical_radical
from .strid import is_spin_balanced
if key == 'rad-rad':
meets_condition_ = is_radical_radical
elif key == 'high-spin':
meets_condition_ = is_spin_balanced
else:
raise ValueError("Unrecognized divide key: {:s}".format(key))
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
rxn_df[key] = tuple(map(meets_condition_, rxn_df['reaction_id']))
rxn_df1 = rxn_df[rxn_df[key]].drop(columns=key)
rxn_df2 = rxn_df[~rxn_df[key]].drop(columns=key)
rxn_csv1_out = os.path.join(dir1, rxn_csv_out)
logger.info("Writing in-category reactions to {:s}"
.format(rxn_csv1_out))
if not os.path.exists(dir1):
os.mkdir(dir1)
write_table_to_csv(rxn_df1, rxn_csv1_out)
rxn_csv2_out = os.path.join(dir2, rxn_csv_out)
logger.info("Writing out-of-category reactions to {:s}"
.format(rxn_csv2_out))
if not os.path.exists(dir2):
os.mkdir(dir2)
write_table_to_csv(rxn_df2, rxn_csv2_out)
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv)
def csv_reindex(table_csv, logger):
""" reindex a table
"""
logger.info("Reading in {:s}".format(table_csv))
table_df = pandas.read_csv(table_csv)
table_df = reindex_table(table_df)
logger.info("Writing updated {:s}".format(table_csv))
write_table_to_csv(table_df, table_csv)
def csv_sort(table_csv, col_key, descending, logger):
""" sort table by column
"""
logger.info("Reading in {:s}".format(table_csv))
table_df = pandas.read_csv(table_csv)
table_df = sort_table(table_df, col_key, descending=descending)
logger.info("Writing updated {:s}".format(table_csv))
write_table_to_csv(table_df, table_csv)
def csv_intersect(table_csvs, col_key, table_csv_out, logger):
""" intersect tables by column
"""
table_dfs = []
for table_csv in table_csvs:
logger.info("Reading in {:s}".format(table_csv))
table_df = pandas.read_csv(table_csv)
table_dfs.append(table_df)
table_df_out = intersect_tables(table_dfs, col_key)
logger.info("Writing {:s}".format(table_csv_out))
write_table_to_csv(table_df_out, table_csv_out)
def csv_merge(table_csvs, col_key, table_csv_out, logger):
""" merge tables by column
"""
table_dfs = []
for table_csv in table_csvs:
logger.info("Reading in {:s}".format(table_csv))
table_df = pandas.read_csv(table_csv)
table_dfs.append(table_df)
table_df_out = merge_tables(table_dfs, col_key)
logger.info("Writing {:s}".format(table_csv_out))
write_table_to_csv(table_df_out, table_csv_out)
# meta scripts
def reactions_initializer(cls, is_candidate, reaction, sid_cols, idx_cols):
""" initialize reactions
"""
assert cls in ('abstraction', 'addition', 'migration')
def _init(spc_csv, rxn_csv, rxn_csv_out, cdt_csv_out, logger):
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
logger.info("Reading in species geometries from {:s}".format(spc_csv))
mgeo_dct = read_geometries(spc_csv)
logger.info("Reading thermo data from {:s}".format(spc_csv))
thv_dct = read_thermo_data(spc_csv)
if not thv_dct:
logger.info("No thermo data found.")
rxn_df_out = table_with_columns_like(rxn_df)
cdt_df_out = table_with_columns_like(rxn_df)
rxn_df_out = update_table_column_keys(rxn_df_out,
col_keys=sid_cols+idx_cols)
cdt_df_out = update_table_column_keys(cdt_df_out,
col_keys=('exception',))
rxn_df = update_table_column_keys(rxn_df, col_keys=('class',))
for rxn_row in iterate_table_row_dicts(rxn_df):
rid = rxn_row['reaction_id']
if is_candidate(rid):
logger.info('reaction: {:s}'.format(rid))
err = None
try:
rxn = reaction(rid, mgeo_dct, thv_dct)
except Exception as err:
logger.info(' exception: {:s}!'.format(str(err)))
rxn = None
if rxn:
sids, idxs = rxn
logger.info(' found {:s}!'.format(cls))
log_sids = ', '.join(
'{:s}: {:s}'.format(sid_col, sid)
for sid_col, sid in zip(sid_cols, sids))
log_idxs = ', '.join(
'{:s}: {:d}'.format(idx_col, idx)
for idx_col, idx in zip(idx_cols, idxs))
logger.info(' {:s}\n {:s}'.format(log_sids, log_idxs))
rxn_df = table_lookup_update(rxn_df, ('reaction_id', rid),
('class', cls))
rxn_row.update(zip(sid_cols, sids))
rxn_row.update(zip(idx_cols, idxs))
rxn_df_out = append_table_row_dicts(rxn_df_out, (rxn_row,))
else:
rxn_row['exception'] = err
cdt_df_out = append_table_row_dicts(cdt_df_out, (rxn_row,))
logger.info("Writing {:s} reactions to {:s}"
.format(cls, os.path.abspath(rxn_csv_out)))
rxn_df_out = reindex_table(rxn_df_out)
write_table_to_csv(rxn_df_out, rxn_csv_out)
logger.info("Writing left-over candidates to {:s}"
.format(os.path.abspath(cdt_csv_out)))
write_table_to_csv(cdt_df_out, cdt_csv_out)
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv)
return _init
def reactions_setup_run(cls, rxn_csv, spc_csv, rxn_rng_strs, run_dir, id2path,
cmd_argv, logger):
""" write xyz files for the runner
"""
assert cls in ('abstraction', 'addition', 'migration')
reaction_xyz_strings = dict(REACTION_XYZ_STRING_MAKERS)[cls]
sid_cols = dict(REACTION_SID_COL_KEYS)[cls]
idx_cols = dict(REACTION_IDX_COL_KEYS)[cls]
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
col_keys = table_column_keys(rxn_df)
assert RID_COL_KEY in col_keys and RXN_IDX_COL_KEY in col_keys
logger.info("Reading in species geometries from {:s}".format(spc_csv))
mgeo_dct = read_geometries(spc_csv)
if rxn_rng_strs:
rxn_idxs = _interpret_range_strings(rxn_rng_strs)
logger.info("Interpreted reaction index range argument: {:s}"
.format(repr(rxn_idxs)))
else:
logger.info("No reaction range argument. Running all reactions.")
rxn_idxs = table_column(rxn_df, RXN_IDX_COL_KEY)
if not os.path.exists(run_dir):
logger.info("Creating run directory {:s}".format(run_dir))
os.mkdir(run_dir)
if not os.path.exists(run_dir):
logger.info("Creating run directory {:s}".format(run_dir))
os.mkdir(run_dir)
def _create_job_dir(idx, rid, sids, idxs):
logger.info("reaction {:d}: {:s}".format(idx, rid))
logger.info(' indices: {:s}'.format(str(idxs)))
dname = id2path(rid)
dpath = os.path.join(run_dir, dname)
logger.info(" Creating job directory {:s}".format(dpath))
if not os.path.exists(dpath):
os.mkdir(dpath)
ret = (EMPTY, RXN_NOT_CREATED_VAL)
dxyz_dct = reaction_xyz_strings(sids, idxs, mgeo_dct)
if dxyz_dct:
dxyz_sids = dxyz_dct.keys()
dxyzs = dxyz_dct.values()
fnames = tuple(map('{:s}.xyz'.format, map(id2path, dxyz_sids)))
fpaths = tuple(os.path.join(dpath, fname) for fname in fnames)
for fpath, dxyz in zip(fpaths, dxyzs):
logger.info(" Writing {:s}".format(fpath))
write_file(fpath, dxyz)
ret = (dpath, RXN_CREATED_VAL)
else:
logger.info(" Failed to write .xyz files.")
if cmd_argv:
cmd_str = ' '.join(cmd_argv)
logger.info(" running command `{:s}` in {:s}"
.format(cmd_str, dpath))
try:
subprocess.check_call(cmd_argv, cwd=dpath)
except Exception as err:
logger.info(" {:s}".format(err))
logger.info('')
return ret
logger.info("Writing job .xyz files")
sub_rxn_df = sql_where_in(rxn_df, RXN_IDX_COL_KEY, rxn_idxs)
sub_idxs = tuple(sql_select_one(sub_rxn_df, RXN_IDX_COL_KEY))
sub_rids = tuple(sql_select_one(sub_rxn_df, RID_COL_KEY))
sub_sids_lst = tuple(zip(*(
sql_select_one(sub_rxn_df, sid_col) for sid_col in sid_cols)))
sub_idxs_lst = tuple(zip(*(
sql_select_one(sub_rxn_df, idx_col) for idx_col in idx_cols)))
paths, stats = zip(*starmap(
_create_job_dir, zip(sub_idxs, sub_rids, sub_sids_lst, sub_idxs_lst)))
rxn_df = update_table_column_keys(rxn_df,
(RXN_PATH_COL_KEY, RXN_STAT_COL_KEY))
rxn_df = update_column_by_index(rxn_df, row_indices(sub_rxn_df),
RXN_PATH_COL_KEY, paths)
rxn_df = update_column_by_index(rxn_df, row_indices(sub_rxn_df),
RXN_STAT_COL_KEY, stats)
logger.info("Writing updated reaction table to {:s}".format(rxn_csv))
write_table_to_csv(rxn_df, rxn_csv)
def reactions_run(cls, rxn_csv, rxn_rng_strs, tpl_txt, nodes, job_argv,
logger):
""" reactions parallel runner
"""
assert cls in ('abstraction', 'addition', 'migration')
if not hasattr(job_argv, '__iter__'):
raise ValueError("Missing run command.")
sid_cols = dict(REACTION_SID_COL_KEYS)[cls]
logger.info("Reading in {:s}".format(rxn_csv))
rxn_df = pandas.read_csv(rxn_csv)
col_keys = table_column_keys(rxn_df)
assert (RID_COL_KEY in col_keys and RXN_IDX_COL_KEY in col_keys and
RXN_PATH_COL_KEY in col_keys and RXN_STAT_COL_KEY in col_keys)
if rxn_rng_strs:
rxn_idxs = _interpret_range_strings(rxn_rng_strs)
logger.info("Interpreted reaction index range argument: {:s}"
.format(repr(rxn_idxs)))
else:
logger.info("No reaction range | |
<reponame>mih/multimatch
#!/usr/bin/python3
import numpy as np
import math
import sys
import collections
def cart2pol(x, y):
"""Transform cartesian into polar coordinates.
:param x: float
:param y : float
:return: rho: float, length from (0,0)
:return: theta: float, angle in radians
"""
rho = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
return rho, theta
def calcangle(x1, x2):
"""Calculate angle between to vectors (saccades).
:param: x1, x2: list of float
:return: angle: float, angle in degrees
"""
angle = math.degrees(
math.acos(
np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))))
return angle
def gen_scanpath_structure(data):
"""Transform a fixation vector into a vector based scanpath representation.
Takes an nx3 fixation vector (start_x, start_y, duration) in the form of
of a record array and transforms it into appropriate vectorbased scanpath
representation. Indices are as follows:
0: fixation_x
1: fixation_y
2: fixation_dur
3: saccade_x
4: saccade_y
5: saccade_lenx
6: saccade_leny
7: saccade_theta
8: saccade_rho
:param: data: record array
:return: eyedata: array-like, list of lists, vector-based scanpath representation
"""
fixation_x = []
fixation_y = []
fixation_dur = []
saccade_x = []
saccade_y = []
saccade_lenx = []
saccade_leny = []
saccade_theta = []
saccade_rho = []
# get the number of rows
length = np.shape(data)[0]
# keep coordinates and durations of fixations
for i in range(0, length):
fixation_x.append(data[i]['start_x'])
fixation_y.append(data[i]['start_y'])
fixation_dur.append(data[i]['duration'])
# fixations are the start coordinates for saccades
for i in range(0, length - 1):
saccade_x.append(data[i]['start_x'])
saccade_y.append(data[i]['start_y'])
# calculate saccade length and angle from vector lengths between fixations
for i in range(1, length):
saccade_lenx.append(fixation_x[i] - saccade_x[i - 1])
saccade_leny.append(fixation_y[i] - saccade_y[i - 1])
rho, theta = cart2pol(saccade_lenx[i - 1], saccade_leny[i - 1])
saccade_rho.append(rho)
saccade_theta.append(theta)
# append everything into an ordered dict.
eyedata = collections.OrderedDict()
eyedata['fixation_x'] = fixation_x
eyedata['fixation_y'] = fixation_y
eyedata['fixation_dur'] = fixation_dur
eyedata['saccade_x'] = saccade_x
eyedata['saccade_y'] = saccade_y
eyedata['saccade_lenx'] = saccade_lenx
eyedata['saccade_leny'] = saccade_leny
eyedata['saccade_theta'] = saccade_theta
eyedata['saccade_rho'] = saccade_rho
return eyedata
def keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data
):
"""
Helper function for scanpath simplification. If no simplification can be
performed on a particular saccade, this functions stores the original data.
:param i: current index
:param j: current index
:param sim_lenx: list
:param sim_leny: list
:param sim_x: list
:param sim_y: list
:param sim_theta: list
:param sim_len: list
:param sim_dur: list
:param data: eyedata, list of list
"""
sim_lenx.insert(j, data['saccade_lenx'][i])
sim_leny.insert(j, data['saccade_leny'][i])
sim_x.insert(j, data['saccade_x'][i])
sim_y.insert(j, data['saccade_y'][i])
sim_theta.insert(j, data['saccade_theta'][i])
sim_len.insert(j, data['saccade_rho'][i])
sim_dur.insert(j, data['fixation_dur'][i])
i += 1
j += 1
return sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j
def simlen(data, TAmp, TDur):
"""Simplify scanpaths based on saccadic length.
Simplify consecutive saccades if their length is smaller than the
threshold TAmp and the duration of the closest fixations is lower
than threshold TDur.
:param: data: array-like, list of lists, output of gen_scanpath_structure
:param: TAmp: float, length in px
:param: TDur: float, time in seconds
:return: eyedata: list of lists, one iteration of length based simplification
"""
if len(data['saccade_x']) < 1:
return data
# if the scanpath is long enough
else:
i = 0
j = 0
# initialize new empty lists for simplified results
sim_dur = []
sim_x = []
sim_y = []
sim_lenx = []
sim_leny = []
sim_theta = []
sim_len = []
# while we don't run into index errors
while i <= len(data['saccade_x']) - 1:
# if saccade is the last one
if i == len(data['saccade_x']) - 1:
# and if saccade has short length:
if data['saccade_rho'][i] < TAmp:
# and if the fixation duration is short:
if (data['fixation_dur'][-1] < TDur) or (data['fixation_dur'][-2] < TDur):
# calculate sum of local vectors for simplification
v_x = data['saccade_lenx'][-2] + data['saccade_lenx'][-1]
v_y = data['saccade_leny'][-2] + data['saccade_leny'][-1]
rho, theta = cart2pol(v_x, v_y)
# save them in the new vectors
sim_lenx[j - 1] = v_x
sim_leny[j - 1] = v_y
sim_theta[j - 1] = theta
sim_len[j - 1] = rho
sim_dur.insert(j, data['fixation_dur'][i - 1])
j -= 1
i += 1
# if fixation duration is long:
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# if saccade doesn't have short length:
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# if saccade is not the last one
else:
# and if saccade has short length
if (data['saccade_rho'][i] < TAmp) and (i < len(data['saccade_x']) - 1):
# and if fixation durations are short
if (data['fixation_dur'][i + 1] < TDur) or (data['fixation_dur'][i] < TDur):
# calculate sum of local vectors in x and y length for simplification
v_x = data['saccade_lenx'][i] + data['saccade_lenx'][i + 1]
v_y = data['saccade_leny'][i] + data['saccade_leny'][i + 1]
rho, theta = cart2pol(v_x, v_y)
# save them in the new vectors
sim_lenx.insert(j, v_x)
sim_leny.insert(j, v_y)
sim_x.insert(j, data['saccade_x'][i])
sim_y.insert(j, data['saccade_y'][i])
sim_theta.insert(j, theta)
sim_len.insert(j, rho)
# add the old fixation duration
sim_dur.insert(j, data['fixation_dur'][i])
i += 2
j += 1
# if fixation durations are long
else:
# insert original data in new lists -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# if saccade doesn't have short length
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# append the last fixation duration
sim_dur.append(data['fixation_dur'][-1])
# append everything into an ordered dict.
eyedata = collections.OrderedDict()
eyedata['fixation_dur'] = sim_dur
eyedata['saccade_x'] = sim_x
eyedata['saccade_y'] = sim_y
eyedata['saccade_lenx'] = sim_lenx
eyedata['saccade_leny'] = sim_leny
eyedata['saccade_theta'] = sim_theta
eyedata['saccade_rho'] = sim_len
return eyedata
def simdir(data,
TDir,
TDur
):
"""Simplify scanpaths based on angular relations between saccades (direction).
Simplify consecutive saccades if the angle between them is smaller than the
threshold TDir and the duration of the intermediate fixations is lower
than threshold TDur.
:param: data: array-like, list of lists, output of gen_scanpath_structure
:param: TDir: float, angle in degrees
:param: TDur: float, time in seconds
:return: eyedata: list of lists, one iteration of direction based simplification
"""
if len(data['saccade_x']) < 1:
return data
# if the scanpath is long enough
else:
i = 0
j = 0
# initialize empty lists
sim_dur = []
sim_x = []
sim_y = []
sim_lenx = []
sim_leny = []
sim_theta = []
sim_len = []
# while we don't run into index errors
while i <= len(data['saccade_x']) - 1:
if i < len(data['saccade_x']) - 1:
# lets check angles
v1 = [data['saccade_lenx'][i], data['saccade_leny'][i]]
v2 = [data['saccade_lenx'][i + 1], data['saccade_leny'][i + 1]]
angle = calcangle(v1, v2)
else:
# an angle of infinite size won't go into any further loop
angle = float('inf')
# if the angle is small and its not the last saccade
if (angle < TDir) & (i < len(data['saccade_x']) - 1):
# if the fixation duration is short:
if data['fixation_dur'][i + 1] < TDur:
# if the fixation durations are short:
# calculate the sum of local vectors
v_x = data['saccade_lenx'][i] + data['saccade_lenx'][i + 1]
v_y = data['saccade_leny'][i] + data['saccade_leny'][i + 1]
rho, theta = cart2pol(v_x, v_y)
# save them in the new vectors
sim_lenx.insert(j, v_x)
sim_leny.insert(j, v_y)
sim_x.insert(j, data['saccade_x'][i])
sim_y.insert(j, data['saccade_y'][i])
sim_theta.insert(j, theta)
sim_len.insert(j, rho)
# add the fixation duration
sim_dur.insert(j, data['fixation_dur'][i])
i += 2
j += 1
else:
# insert original data in new list -- no simplification
sim_lenx, sim_leny, sim_x, sim_y, sim_theta, sim_len, sim_dur, i, j = keepsaccade(i,
j,
sim_lenx,
sim_leny,
sim_x,
sim_y,
sim_theta,
sim_len,
sim_dur,
data)
# elif the angle is small, but its the last saccade:
elif (angle < TDir) & (i == len(data['saccade_x']) - 1):
# if the fixation duration is short:
if data['fixation_dur'][i + 1] < TDur:
# calculate sum of local vectors
v_x = data['saccade_lenx'][i - 2] + data['saccade_lenx'][i - 1]
v_y = data['saccade_leny'][i | |
#!/usr/bin/env python
#
# Copyright 2014 Tuenti Technologies S.L.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os.path
import logging
import datetime
import sh
from repoman.repository import Repository as BaseRepo, \
RepositoryError, MergeConflictError
from repoman.changeset import Changeset
from repoman.merge import MergeStrategy
from repoman.reference import Reference
logger = logging.getLogger(__name__)
class GitCmd(object):
def __init__(self, path):
self.path = path
def __call__(self, *args, **kwargs):
try:
cmd = sh.git(_cwd=self.path, _tty_out=False, *args, **kwargs)
except sh.ErrorReturnCode as e:
raise RepositoryError("'%s' failed in %s: %s" % (e.full_cmd, self.path, e))
if '_iter' in kwargs and kwargs['_iter'] != None:
return cmd
# For convenience, remove last new line of command output
return re.sub('(\n|\n\r)$', '', cmd.stdout.decode('utf-8'))
class GitMerge(MergeStrategy):
def __init__(self, *args, **kwargs):
super(GitMerge, self).__init__(*args, **kwargs)
self._git = GitCmd(self.repository.path)
def _validate_local_branch(self):
if self.local_branch is None:
self.local_branch = self.repository.get_branch()
return
if not isinstance(self.local_branch, Reference):
raise RepositoryError(
"In merge, local branch must be a Reference object")
self._git('checkout', self.local_branch.name)
def perform(self):
self._validate_local_branch()
self._git('merge', '--no-ff', '--no-commit',
self.other_rev.hash,
_ok_code=[0, 1])
conflicts = self._git('diff', name_only=True, diff_filter='U').split()
if conflicts:
raise MergeConflictError("Conflicts found: merging %s failed" %
", ".join(conflicts))
def abort(self):
self._git('merge', '--abort')
def commit(self):
if len(self._git('status', porcelain=True, _iter=True)) == 0:
return None
commit_message = self.repository.message_builder.merge(
other_branch=self.other_branch_name,
other_revision=self.other_rev.shorthash,
local_branch=self.local_branch.name,
local_revision=self.local_branch.get_changeset().shorthash
)
author = str(self.repository.signature)
self._git("commit", m=commit_message, author=author)
return self.repository.tip()
class GitMergeFastForward(GitMerge):
def __init__(self, *args, **kwargs):
super(GitMergeFastForward, self).__init__(*args, **kwargs)
self._ff_merged = False
def perform(self):
self._validate_local_branch()
try:
self._git('merge', '--ff-only', self.other_rev.hash)
self._ff_merged = True
except:
# TODO: Is this what we want? log something in any case
super(GitMergeFastForward, self).perform()
def abort(self):
if self._ff_merged:
self._git('reset', 'HEAD^', hard=True)
return
super(GitMergeFastForward, self).abort()
def commit(self):
if not self._ff_merged:
return super(GitMergeFastForward, self).commit()
return self.repository.tip()
class Repository(BaseRepo):
"""
Models a Git Repository
"""
def __init__(self, *args, **kwargs):
super(Repository, self).__init__(*args, **kwargs)
self._git = GitCmd(self.path)
def __getitem__(self, key):
"""
Implements access thorugh [] operator for changesets
key -- changeset hash or branch name
"""
return self._new_changeset_object(key)
def _new_changeset_object(self, refname):
"""
Return a new Changeset object with the provided info
"""
tags = ' '.join(self.get_changeset_tags(refname))
info, body = self._git("log", "-1",
"--pretty=%H,%ct,%cn%n%B",
refname).split("\n", 1)
sha1, committer_time, committer_name = info.split(",", 2)
initial_values = [
None, # Local changeset that does not exist in GIT
sha1,
tags,
None,
committer_name,
body,
datetime.datetime.utcfromtimestamp(float(committer_time)),
]
c = Changeset(self, tuple(initial_values))
return c
def get_changeset_branches(self, changeset):
""" Inherited method
:func:`~repoman.repository.Repository.get_changeset_branches`
"""
branches = self.get_branches()
branch_contains = lambda branch: self.get_ancestor(
changeset, branch.get_changeset()).hash == changeset.hash
cs_branches = map(lambda b: b.name, filter(branch_contains, branches))
return cs_branches
def branch(self, name):
"""Inherited method :func:`~repoman.repository.Repository.branch` """
self._git("checkout", "-B", name)
return self._new_branch_object(name)
def tag(self, name, revision=None, message=None):
"""Inherited method :func:`~repoman.repository.Repository.tag` """
if not revision:
revision = 'HEAD'
args = ["tag", name, revision]
if message:
args += ["-m", message]
args += ["-a"]
self._git(*args)
return self._new_tag_object(name)
def strip(self, changeset):
"""Inherited method :func:`~repoman.repository.Repository.strip` """
self._git('reset', '--hard', '%s^' % changeset.hash)
def branch_exists(self, branch_name):
"""Inherited method :func:`~repoman.repository.Repository.branch_exists`
"""
for branch in self.get_branches():
if branch.name == branch_name:
return True
return False
def tag_exists(self, tag_name):
"""Inherited method :func:`~repoman.repository.Repository.tag_exists`
"""
return tag_name in self.tags()
def tip(self):
"""Inherited method :func:`~Repository.tip` """
return self['HEAD']
def get_ancestor(self, cs1, cs2):
"""Inherited method :func:`~repoman.repository.Repository.get_ancestor`
"""
if not cs1 or not cs2:
error = "Error getting ancestor, " +\
"either rev1 or rev2 are None: %s , %s" % (cs1, cs2)
logger.error(error)
raise RepositoryError(error)
return self[self._git('merge-base', cs1.hash, cs2.hash)]
def get_branches(self, active=False, closed=False):
"""Inherited method :func:`~repoman.repository.Repository.get_branches`
"""
branches = list([
branch_name.strip() for branch_name in
self._git(
"for-each-ref",
"refs/heads",
format="%(refname:short)",
_iter=True,
)
])
try:
# Add current branch even if it doesn't have commits
current = self.get_branch()
if current.name not in branches:
branches.append(current.name)
except:
# TODO: Better handle error cases here, related with HEAD not existing
pass
return [self._new_branch_object(branch) for branch in branches if branch != 'HEAD']
def exterminate_branch(self, branch_name, repo_origin, repo_dest):
"""Inherited method
:func:`~repoman.repository.Repository.exterminate_branch`
"""
if not self.terminate_branch(branch_name, repo_origin, repo_dest):
return
# Deleting remotely
self.push(repo_origin, repo_dest, rev='', ref_name=branch_name)
def terminate_branch(self, branch_name, repo_origin, repo_dest):
"""Inherited method
:func:`~repoman.repository.Repository.terminate_branch`
"""
if not self.branch_exists(branch_name):
return False
current = None
try:
current = self._git('rev-parse', '--abbrev-ref', 'HEAD')
except:
pass
if current != None and current == branch_name:
self._git('checkout', '--detach')
self._git('branch', '-D', branch_name)
return True
def get_branch(self, branch_name=None):
"""Inherited method
:func:`~repoman.repository.Repository.get_branch`
"""
if not branch_name:
branch_name = self._git("rev-parse", "--abbrev-ref", "HEAD")
else:
if not self.branch_exists(branch_name):
raise RepositoryError('Branch %s does not exist in repo %s'
% (branch_name, self.path))
return self._new_branch_object(branch_name)
def get_revset(self, cs_from=None, cs_to=None, branch=None):
"""Inherited method
:func:`~repoman.repository.Repository.get_revset`
"""
if branch is not None:
b = self.get_branch(branch)
if cs_to is None:
cs_to = b.get_changeset().hash
else:
cs_to = self.get_ancestor(self[cs_to], b.get_changeset()).hash
if cs_to is None:
cs_to = 'HEAD'
if cs_from is None:
cs = self._git(
'log', '--pretty=%H', '--reverse',
cs_to,
_iter=True)
else:
try:
# If cs_from is not an ancestor of cs_to we shouldn't output
# anything
self._git('merge-base', '--is-ancestor', cs_from, cs_to)
except:
return
rev_range = "%s..%s" % (cs_from, cs_to)
cs = self._git(
'log', '--pretty=%H', '--reverse', '--ancestry-path',
rev_range,
_iter=True)
# When printing git log ranges, it doesn't include the root one
yield self._new_changeset_object(cs_from)
for c in cs:
yield self._new_changeset_object(c.strip())
def pull(self, remote, revision=None, branch=None):
"""Inherited method
:func:`~repoman.repository.Repository.pull`
"""
git_dir = os.path.join(
self.path, sh.git('rev-parse', '--git-dir', _cwd=self.path).strip())
git = GitCmd(git_dir)
refspec = '+refs/*:refs/*'
if branch != None:
refspec = '+refs/heads/%s:refs/heads/%s' % (branch, branch)
logger.debug("Executing git -c core.bare=true fetch %s %s" % (remote, refspec))
output = git('-c', 'core.bare=true', 'fetch', remote, refspec, _err_to_out=True)
logger.debug("Output:\n%s" % output)
self._clean()
def push(self, orig, dest, rev=None, ref_name=None, force=False):
"""Inherited method
:func:`~repoman.repository.Repository.push`
"""
all_tags_option = "--tags"
all_notes_refspec = "refs/notes/*:refs/notes/*"
if rev is None and ref_name is None:
# Push everything
refspec = "refs/*:refs/*"
elif rev is None:
refspec = "%s:%s" % (ref_name, ref_name)
elif ref_name is None:
raise RepositoryError(
"When pushing, revision specified but not reference name")
else:
if self.tag_exists(ref_name):
# We don't know what this ref is in remote, but here it is a tag
ref_name = "refs/tags/%s" % ref_name
all_tags_option = ""
else:
# In any other case, we assume it is a branch
ref_name = "refs/heads/%s" % ref_name
refspec = "%s:%s" % (rev, ref_name)
if all_tags_option:
self._git("push", dest, refspec, all_tags_option,
all_notes_refspec, f=force)
else:
self._git("push", dest, refspec, all_notes_refspec,
f=force)
return self.tip()
def _merge(self, local_branch=None, other_rev=None,
other_branch_name=None, dry_run=False, strategy=GitMerge):
merge = strategy(self, local_branch, other_rev, other_branch_name)
merge.perform()
if dry_run:
merge.abort()
return None
return merge.commit()
def merge(self, local_branch=None, other_rev=None,
other_branch_name=None,
dry_run=False):
"""Inherited method
:func:`~repoman.repository.Repository.merge`
"""
return self._merge(local_branch, other_rev, other_branch_name, dry_run,
strategy=GitMerge)
def merge_fastforward(self, local_branch=None, other_rev=None,
other_branch_name=None,
dry_run=False):
return self._merge(local_branch, other_rev, other_branch_name, dry_run,
strategy=GitMergeFastForward)
def add(self, files):
if isinstance(files, str):
files = [files]
if len(files) > 0:
self._git("add", *files)
def commit(self, message, custom_parent=None,
allow_empty=False):
"""Inherited method
:func:`~repoman.repository.Repository.commit`
"""
status = self._git('status', porcelain=True, _iter=True)
if not status and not custom_parent and not allow_empty:
logger.debug("Nothing to commit, repository clean")
return None
# TODO: If custom_parent is deprecated, we can remove this code
# and use git commit directly instead of write-tree and commit-tree
parents = []
for head in ('HEAD', custom_parent, 'MERGE_HEAD'):
try:
parent = self[head].hash
if parent not in parents:
parents.append(parent)
except:
pass
parent_args = []
for parent in parents:
parent_args += ['-p', parent]
# TODO: It currently mimics previous implementation, not sure if
# this is what we want. It automatically adds modified files only, with
# other statuses it may generate an empty commit even if it was not
# allowed.
allow_empty = True
modified = []
for s in status:
status, path = s.strip().split(maxsplit=1)
path = path.replace('"', '')
if status == 'M':
modified.append(path)
self.add(modified)
tree = self._git('write-tree').strip()
env = os.environ.copy().update({
'GIT_AUTHOR_NAME': self.signature.user,
'GIT_AUTHOR_EMAIL': self.signature.email,
'GIT_COMMITTER_NAME': self.signature.user,
'GIT_COMMITTER_EMAIL': self.signature.email,
})
commit = self._git('commit-tree', tree, '-m', message, *parent_args, _env=env).strip()
self._git('reset', '--hard', commit)
return self.tip()
def update(self, ref):
"""Inherited method
:func:`~repoman.repository.Repository.update`
"""
self._clean()
self._git("checkout", ref)
return self.tip()
def _clean(self):
"""
Clean up the working copy. More information about Git clean up:
http://stackoverflow.com/questions/22620393/git-remove-local-changes
| |
from farms C and D to the blending station. This calculation assumed the milk would be delivered at the original concentration, which will require at least two trucks to keep the sources of milk separated until they reach the blending facility. The plan does not allow pooling these sources prior to transport, otherwise they would need to be "unblended" to meet the differing quality requirements of the customers.
# ## Option 3. Pool delivery from remote farms
#
# Comparing Option 1 with Option 2 shows there is significantly more profit to be earned by purchasing raw milk from the remote farms. But that option requires an additional truck to keep the supplies separated during transport. Otherwise mixing raw milk from the remote farms would result in a uniform mixture that would not meet the requirements of both customers.
#
# Because only one truck with a single tank is available for transport from the remote farms, the new problem is to combine purchases from the remote farms into a single pool of uniform composition that could be blended milk from local farms to meet individual customer requirements. Compared to option 2, the profit potential may be reduced due to pooling, but at least it does not require an additional truck.
#
# This is the pooling problem. There are a several of mathematical formulations in the literature of this problem. The first analysis here uses a formulation called the "p-parameterization" where the pool composition is a new decision variable $p$. Other decision new variables are $x_r$ which are the amounts of raw milk purchased from remote farms $r\in R$, and $y_c$ which are the amounts delivered to customer $c\in C$ from the pool.
#
# The profit objective now includes the cost of purchasing raw milk from the remote farms and the income received for selling material from the pool.
#
# $$
# \begin{align*}
# \text{Profit} & = \sum_{(l,c)\ \in\ L \times C} (\text{price}_c - \text{cost}_l)\ z_{l,c}
# + \sum_{c\in C} \text{price}_c y_{c} - \sum_{r\in R} \text{cost}_r x_{r}
# \end{align*}
# $$
#
# The product delivered to each customer from local farms and the pool can not exceed demand.
#
# $$
# \begin{align*}
# \sum_{l\in L} z_{l, c} + y_{c} & \leq \text{demand}_{c} & \forall c\in C
# \end{align*}
# $$
#
# Purchases from the remote farms and the amounts delivered to customers from the pool must balance.
#
# $$
# \begin{align*}
# \sum_{r\in R}x_{r} & = \sum_{c\in C} y_{c} \\
# \end{align*}
# $$
#
# The average milk fat composition of the pool, $p$, must satisfy an overall balance on milk fat entering the pool from the remote farms and the milk fat delivered to customers.
#
# $$
# \begin{align*}
# \sum_{r\in R}\text{conc}_{r}\ x_{r} & = \underbrace{p \sum_{c\in C} y_{c}}_{\text{bilinear}}
# \end{align*}
# $$
#
# Finally, the milk fat required by each customer $c\in C$ satisfies a blending constraint.
#
# $$
# \begin{align*}
# \underbrace{p y_{c}}_{\text{bilinear}} + \sum_{(l,c)\ \in\ L \times C} \text{conc}_{l}\ z_{l,c}
# & \geq \text{conc}_{c}\ (\sum_{l\in L} z_{l, c} + y_{c})
# & \forall c \in C
# \end{align*}
# $$
#
# The last two constraints include bilinear terms from the project of decision variable $p$ with decision variables $y_c$ for all $c\in C$.
#
# The bilinear terms have a profound consequence on the nature of the optimization problem. To demonstrate, the following cell creates a linear program to maximize profit as a function of $p$, then explores how profit changes as a function of parameter $p$.
# In[5]:
import pyomo.environ as pyo
def milk_pooling(p=0, q="fat"):
m = pyo.ConcreteModel('Milk Pooling Model')
# define sources
m.L = pyo.Set(initialize=local_suppliers.index)
m.R = pyo.Set(initialize=remote_suppliers.index)
# define customers
m.C = pyo.Set(initialize=customers.index)
# define flowrates
m.x = pyo.Var(m.R, domain=pyo.NonNegativeReals)
m.y = pyo.Var(m.C, domain=pyo.NonNegativeReals)
m.z = pyo.Var(m.L * m.C, domain=pyo.NonNegativeReals)
m.p = pyo.Param(default=p)
@m.Objective(sense=pyo.maximize)
def profit(m):
return + sum(m.z[l, c]*(customers.loc[c, "price"] - suppliers.loc[l, "cost"]) for l, c in m.L * m.C) + sum(m.y[c]*customers.loc[c, "price"] for c in m.C) - sum(m.x[r]*suppliers.loc[r, "cost"] for r in m.R)
@m.Constraint(m.C)
def customer_demand(m, c):
return sum(m.z[l, c] for l in m.L) + m.y[c] <= customers.loc[c, "demand"]
@m.Constraint()
def pool_balance(m,):
return sum(m.x[r] for r in m.R) == sum(m.y[c] for c in m.C)
@m.Constraint()
def pool_quality(m):
return sum(suppliers.loc[r, q] * m.x[r] for r in m.R) == m.p * sum(m.x[r] for r in m.R)
@m.Constraint(m.C)
def customer_quality(m, c):
return m.p * m.y[c] + sum(suppliers.loc[l, q] * m.z[l, c] for l in m.L) >= customers.loc[c, q] * (sum(m.z[l, c] for l in m.L) + m.y[c])
pyo.SolverFactory('cbc').solve(m)
return m
p = 0.04
m = milk_pooling(p)
print(f"\nprofit = {m.profit():0.2f}")
# The result shows the profit if the pool of milk transported from the remote farms has a milk fat content $p = 0.04$. The profit of 100,000, is better than 81,000 earned for business as usual with just local suppliers, but falls short of the 122,441 earned if the remote milk supply could be transported without pooling.
#
# The following cell presents a full report of the solution.
# In[6]:
def report_solution(m):
# Supplier report
S = suppliers.copy()
for l in m.L:
for c in m.C:
S.loc[l, c] = m.z[l, c]()
for r in m.R:
S.loc[r, "Pool"] = m.x[r]()
S = S.fillna(0)
S["Amount"] = S[m.C].sum(axis=1) + S["Pool"]
S["Expense"] = S["Amount"]*S["cost"]
# Customer report
C = customers.copy()
for c in m.C:
for l in m.L:
C.loc[c, l] = m.z[l, c]()
for c in m.C:
C.loc[c, "Pool"] = m.y[c]()
C = C.fillna(0)
C["Amount"] = C[m.L].sum(axis=1) + C["Pool"]
C["fat delivered"] = (sum(C[l]*S.loc[l, "fat"] for l in m.L) + C["Pool"] * m.p())/C["Amount"]
C["Income"] = C["Amount"] * C["price"]
print(m)
print(f"\npool composition = {m.p()}")
print(f"profit = {m.profit():0.2f}")
print(f"\nSupplier Report\n")
display(S.round(4))
print(f"\nCustomer Report\n")
display(C.round(4))
report_solution(m)
# ## Profit depends on pool composition $p$
# As this stage the calculations find the maximum profit for a given value of $p$. The challenge, of course, is that the optimal value of $p$ is unknown. The following cell computes profits over a range of $p$.
# In[7]:
p_plot = np.linspace(0.025, 0.055, 200)
profit_plot = [milk_pooling(_).profit() for _ in p_plot]
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(p_plot, profit_plot)
ax.set_title("Milk Pooling")
ax.set_xlabel("Pool composition p")
ax.set_ylabel("Profit")
ax.grid(True)
# The results show the maximum achievable profit with pooling is over than 102,000. What is needed is an optimization technique that can solve for the optimal pool composition and profit. This plot demonstrates how the non-convex bilinear constraints can result in multiple local maxima and a saddle points.
# ## Convex Approximation
#
# Two of the constraints in the p-parameterized milk pooling model have bilinear terms $p y_c$ that are the product of decision variables $p$ and $y_c$ for $c\in C$. Designating the value of these terms as $w_c = p y_c$ the balance on milk fat in the inflow and outflow from the pool becomes a linear constraint
#
# $$
# \begin{align*}
# \sum_{r\in R}\text{conc}_{r}\ x_{r} & = \sum_{c\in C} w_{c}
# \end{align*}
# $$
#
# and the blending constraint to meet the milk fat requirement for each customer becomes a set of linear constraints
#
# $$
# \begin{align*}
# w_c + \sum_{(l,c)\ \in\ L \times C} \text{conc}_{l}\ z_{l,c}
# & \geq \text{conc}_{c}\ (\sum_{l\in L} z_{l, c} + y_{c})
# & \forall c \in C
# \end{align*}
# $$
#
# What remain, of course, are the bilinear terms $w_c = p y_c$. The values of $y_c$ are bounded between 0 and the demand of customer $c$, and the value of $p$ is bounded between the minimum and maximum milk fat concentrations of the remote farms. The bilinear terms form a set of equality constraints and bounded decision variables.
#
# $$
# \begin{align*}
# & w_c = p y_c & \forall c\in C \\
# \\
# 0 \leq\ & y_c \leq \text{demand}_c\ & \forall c\in C \\
# \min_{r\in R} \text{conc}_r \leq\ & p \leq \max_{r\in R} \text{conc}_r \\
# \end{align*}
# $$
#
# One strategy to create to convert this bilinear problem into a linear problem is to replace the equality constraints $w_c = p y_c$ with a set of linear inequality constraints that keeps the difference $w_c - y p_c$ | |
True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "2",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"keylife": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"loopback_asymroute": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"certificate": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"priority": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"psksecret_remote": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dpd_retrycount": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv6_prefix": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"proposal": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "des-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "des-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "des-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "des-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "des-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "3des-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "3des-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "3des-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "3des-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "3des-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128gcm-prfsha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128gcm-prfsha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128gcm-prfsha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes128gcm-prfsha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes192-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes192-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes192-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes192-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes192-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes256-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes256-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes256-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes256-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes256-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes256gcm-prfsha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
| |
with DNA ligase for 30 minutes at room-temperature.'))
elif fragment.bottomLeftOverhang.sequence != '':
if isComplementary(fragment.topLeftOverhang.sequence.lower(), fragment.topRightOverhang.sequence.lower()):
ligated = DNA('plasmid',fragment.name+' self-ligation',fragment.sequence+fragment.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragment, ), 'Self-ligate ('+fragment.name+') with DNA ligase for 30 minutes at room-temperature.'))
if len(products) > 0 or len(inputDNAs) == 1:
return products
i = 0
while i < len(inputDNAs):
fragOne = inputDNAs[i]
if not isinstance(fragOne, DNA):
print '\n*Ligate Warning*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
i += 1
continue
j = i + 1
while j < len(inputDNAs):
fragTwo = inputDNAs[j]
if not isinstance(fragOne, DNA) or not isinstance(fragTwo, DNA):
print '\n*Ligate Warning*: Ligate function was passed a non-DNA argument. Argument discarded.\n'
j += 1
continue
elif fragOne.DNAclass != 'digest' or fragTwo.DNAclass != 'digest':
j += 1
continue
(LTL,LTR,LBL,LBR) = SetFlags(fragOne)
(RTL,RTR,RBL,RBR) = SetFlags(fragTwo)
# first3 is the number of 3' overhangs for the left fragment, and so on for the other three classifiers
(first3, first5, second3, second5) = (LTR + LBL, LBR + LTL, RTR + RBL, RBR + RTL)
firstFlag = first3 + first5
secondFlag = second3 + second5
# non-blunt end ligation:
if first3 == 2:
# Here, you know that it has LTR and LBL
# But you don't know about its RXX fields
if isComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence+fragTwo.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
ligated.bottomLeftOverhang = Overhang(fragOne.bottomLeftOverhang.sequence)
# you don't know whether it is RTR or RBR
if RTR:
ligated.topRightOverhang = Overhang(fragTwo.topRightOverhang.sequence)
elif RBR:
ligated.bottomRightOverhang = Overhang(fragTwo.bottomRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know it's not going to circularize, but you also know it has a LBL
elif isComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragTwo.topRightOverhang.sequence+fragOne.sequence)
ligated.topRightOverhang = Overhang(fragOne.topRightOverhang.sequence)
# you don't know whether it is RTL or RBL
if RTL:
ligated.topLeftOverhang = Overhang(fragTwo.topLeftOverhang.sequence)
elif RBL:
ligated.bottomLeftOverhang = Overhang(fragTwo.bottomLeftOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
if isReverseComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence)+reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
ligated.bottomLeftOverhang = Overhang(fragOne.bottomLeftOverhang.sequence)
# you don't know whether it is RBL or RTL
if RTL:
ligated.bottomRightOverhang = Overhang(reverse(fragTwo.topLeftOverhang.sequence))
elif RBL:
ligated.topRightOverhang = Overhang(reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know it's not going to circularize, but you also know it has a LBL
elif isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+reverse(fragTwo.bottomLeftOverhang.sequence)+fragOne.sequence)
ligated.topRightOverhang = Overhang(fragOne.topRightOverhang.sequence)
# you don't know whether it is RTR or RBR
if RTR:
ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.topRightOverhang.sequence))
elif RBR:
ligated.topLeftOverhang = Overhang(reverse(fragTwo.bottomRightOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif first3 == 1:
if LTR:
# then you know it must have LTL
if RTR:
# then, if it is to ligate, it must have compatible RTL
if isReverseComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.topRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+reverseComplement(fragTwo.sequence))
ligated.topLeftOverhang = Overhang(fragOne.topLeftOverhang.sequence)
# you don't know whether it is RTL or RBL
if RTL:
ligated.bottomRightOverhang = Overhang(reverse(fragTwo.topLeftOverhang.sequence))
elif RBL:
ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# now, you know it's not going to circularize, but you know it has LTL
elif isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+fragOne.topLeftOverhang.sequence+fragOne.sequence)
ligated.topRightOverhang = Overhang(fragOne.topRightOverhang.sequence)
# you dont know whether you have RTR (=> BLO) or RBR (=> TLO) ==> correction: yes you do, you have RTR
ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.topRightOverhang.sequence))
# if RTR:
# ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.topRightOverhang.sequence))
# elif RBR:
# ligated.topLeftOverhang = Overhang(reverse(fragTwo.bottomRightOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know here that you have LTR and LTL, and that you do not have RTR
else:
# to ligate, it must have RBL and RBR
if isComplementary(fragOne.topRightOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragOne.topRightOverhang.sequence+fragTwo.sequence)
ligated.topLeftOverhang = Overhang(fragOne.topLeftOverhang.sequence)
ligated.bottomRightOverhang = Overhang(fragTwo.bottomRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
# here, you know you have LTR and LTL, has a complementary RBR and does not have a RTR
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragOne.topLeftOverhang.sequence+fragOne.sequence)
ligated.topRightOverhang = Overhang(fragOne.topRightOverhang.sequence)
if RTL:
ligated.topLeftOverhang= Overhang(fragTwo.topLeftOverhang.sequence)
elif RBL:
ligated.bottomLeftOverhang = Overhang(fragTwo.bottomLeftOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
# you know it has LBL as its 3 and LBR as its 5
if RTR:
# then, if it is to ligate, it must have compatible RTL
if isComplementary(fragTwo.topRightOverhang.sequence.upper(), fragOne.bottomLeftOverhang.sequence.upper()):
if isComplementary(fragTwo.topLeftOverhang.sequence.upper(), fragOne.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence+fragTwo.topRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
ligated.bottomRightOverhang = Overhang(fragOne.bottomRightOverhang.sequence)
# you don't know whether it is a RBL or RTL
if RTL:
ligated.topLeftOverhang = Overhang(fragTwo.topLeftOverhang.sequence)
elif RBL:
ligated.bottomLeftOverhang = Overhang(fragTwo.bottomLeftOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know it's not going to circularize, but you know it has LBR
elif isComplementary(fragTwo.topLeftOverhang.sequence.upper(), fragOne.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
ligated.bottomLeftOverhang = Overhang(fragOne.bottomLeftOverhang.sequence)
if RTR:
ligated.topRightOverhang = Overhang(fragTwo.topRightOverhang.sequence)
elif RBR:
ligated.bottomRightOverhang = Overhang(fragTwo.bottomRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# up to here is good
else:
# you kno it has LBL, LBR, and not RTR
# to ligate, it must have RBL and RBR
if isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',Complement(fragOne.bottomLeftOverhang.sequence)+fragOne.sequence+Complement(fragOne.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+Complement(fragOne.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
ligated.bottomLeftOverhang = Overhang(fragOne.bottomLeftOverhang.sequence)
if RTL:
ligated.bottomRightOverhang = Overhang(reverse(fragTwo.topLeftOverhang.sequence))
elif RBL:
ligated.topRightOverhang = Overhang(reverse(fragTwo.bottomLeftOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# you know it's not going to circularize, but you know it has LBL
elif isReverseComplementary(fragOne.bottomLeftOverhang.sequence.upper(), fragTwo.bottomLeftOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+Complement(fragOne.bottomLeftOverhang.sequence)+fragOne.sequence)
ligated.bottomRightOverhang = Overhang(fragOne.bottomRightOverhang.sequence)
ligated.topLeftOverhang = Overhang(reverse(fragTwo.bottomRightOverhang))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# here first3 == 0, so you know it has LTL and LBR
else:
if isComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
if isComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragTwo.sequence+fragOne.topLeftOverhang.sequence+fragTwo.sequence)
ligated.bottomRightOverhang = Overhang(fragOne.bottomRightOverhang.sequence)
if RTL:
ligated.topLeftOverhang = Overhang(fragTwo.topLeftOverhang.sequence)
elif RBL:
ligated.bottomLeftOverhang = Overhang(fragTwo.bottomLeftOverhang)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif isComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+fragTwo.topLeftOverhang.sequence+fragTwo.sequence)
ligated.topLeftOverhang = Overhang(fragOne.topLeftOverhang.sequence)
if RTR:
ligated.topRightOverhang = Overhang(fragTwo.topRightOverhang.sequence)
elif RBR:
ligated.bottomRightOverhang = Overhang(fragTwo.bottomRightOverhang.sequence)
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
# up to here is good
# here first3 == 0, so you know it has LTL and LBR
if isReverseComplementary(fragOne.topLeftOverhang.sequence.upper(), fragTwo.topLeftOverhang.sequence.upper()):
if isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.topLeftOverhang.sequence+fragOne.sequence+reverse(fragTwo.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
else:
ligated = DNA('digest',fragOne.name+', '+fragTwo.name+' ligation product',reverseComplement(fragTwo.sequence)+fragOne.topLeftOverhang.sequence+fragOne.sequence)
ligated.bottomRightOverhang = Overhang(fragOne.bottomRightOverhang.sequence)
if RTR:
ligated.bottomLeftOverhang = Overhang(reverse(fragTwo.topRightOverhang.sequence))
if RBR:
ligated.topLeftOverhang = Overhang(reverse(fragTwo.bottomRightOverhang.sequence))
products.append(ligatePostProcessing(ligated, (fragOne, fragTwo), 'Ligate ('+fragOne.name+', '+fragTwo.name+') with DNA ligase for 30 minutes at room-temperature.'))
elif isReverseComplementary(fragOne.bottomRightOverhang.sequence.upper(), fragTwo.bottomRightOverhang.sequence.upper()):
ligated = DNA('plasmid',fragOne.name+', '+fragTwo.name+' ligation product',fragOne.sequence+Complement(fragOne.bottomRightOverhang.sequence)+reverseComplement(fragTwo.sequence))
ligated.topLeftOverhang = Overhang(fragOne.topLeftOverhang.sequence)
ligated.bottomRightOverhang = Overhang(reverse(fragTwo.topLeftOverhang.sequence))
j += 1
i += | |
# -*- coding: utf-8 -*-
"""
Execution utilities.
"""
import multiprocessing
import concurrent.futures as cf
from subprocess import call, Popen, PIPE
import logging
import gc
from math import floor
import os
import psutil
import getpass
import shlex
from warnings import warn
from rex.utilities.loggers import LOGGERS, log_mem
from rex.utilities.exceptions import (ExecutionError, SlurmWarning,
ParallelExecutionWarning)
logger = logging.getLogger(__name__)
class SubprocessManager:
"""Base class to handle subprocess execution."""
# get username as class attribute.
USER = getpass.getuser()
@staticmethod
def make_path(d):
"""Make a directory tree if it doesn't exist.
Parameters
----------
d : str
Directory tree to check and potentially create.
"""
if not os.path.exists(d):
os.makedirs(d)
@staticmethod
def make_sh(fname, script):
"""Make a shell script (.sh file) to execute a subprocess.
Parameters
----------
fname : str
Name of the .sh file to create.
script : str
Contents to be written into the .sh file.
"""
logger.debug('The shell script "{n}" contains the following:\n'
'~~~~~~~~~~ {n} ~~~~~~~~~~\n'
'{s}\n'
'~~~~~~~~~~ {n} ~~~~~~~~~~'
.format(n=fname, s=script))
with open(fname, 'w+') as f:
f.write(script)
@staticmethod
def rm(fname):
"""Remove a file.
Parameters
----------
fname : str
Filename (with path) to remove.
"""
os.remove(fname)
@staticmethod
def submit(cmd):
"""Open a subprocess and submit a command.
Parameters
----------
cmd : str
Command to be submitted using python subprocess.
Returns
-------
stdout : str
Subprocess standard output. This is decoded from the subprocess
stdout with rstrip.
stderr : str
Subprocess standard error. This is decoded from the subprocess
stderr with rstrip. After decoding/rstrip, this will be empty if
the subprocess doesn't return an error.
"""
cmd = shlex.split(cmd)
# use subprocess to submit command and get piped o/e
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
stderr = stderr.decode('ascii').rstrip()
stdout = stdout.decode('ascii').rstrip()
if process.returncode != 0:
raise OSError('Subprocess submission failed with return code {} '
'and stderr:\n{}'
.format(process.returncode, stderr))
return stdout, stderr
@staticmethod
def s(s):
"""Format input as str w/ appropriate quote types for python cli entry.
Examples
--------
list, tuple -> "['one', 'two']"
dict -> "{'key': 'val'}"
int, float, None -> '0'
str, other -> 'string'
"""
if isinstance(s, (list, tuple, dict)):
return '"{}"'.format(s)
elif not isinstance(s, (int, float, type(None))):
return "'{}'".format(s)
else:
return '{}'.format(s)
@staticmethod
def walltime(hours):
"""Get the SLURM walltime string in format "HH:MM:SS"
Parameters
----------
hours : float | int
Requested number of job hours.
Returns
-------
walltime : str
SLURM walltime request in format "HH:MM:SS"
"""
m_str = '{0:02d}'.format(round(60 * (hours % 1)))
h_str = '{0:02d}'.format(floor(hours))
return '{}:{}:00'.format(h_str, m_str)
class PBS(SubprocessManager):
"""Subclass for PBS subprocess jobs."""
def __init__(self, cmd, alloc, queue, name='reV',
feature=None, stdout_path='./stdout'):
"""Initialize and submit a PBS job.
Parameters
----------
cmd : str
Command to be submitted in PBS shell script. Example:
'python -m reV.generation.cli_gen'
alloc : str
HPC allocation account. Example: 'rev'.
queue : str
HPC queue to submit job to. Example: 'short', 'batch-h', etc...
name : str
PBS job name.
feature : str | None
PBS feature request (-l {feature}).
Example: 'feature=24core', 'qos=high', etc...
stdout_path : str
Path to print .stdout and .stderr files.
"""
self.make_path(stdout_path)
self.id, self.err = self.qsub(cmd,
alloc=alloc,
queue=queue,
name=name,
feature=feature,
stdout_path=stdout_path)
@staticmethod
def check_status(job, var='id'):
"""Check the status of this PBS job using qstat.
Parameters
----------
job : str
Job name or ID number.
var : str
Identity/type of job identification input arg ('id' or 'name').
Returns
-------
out : str or NoneType
Qstat job status character or None if not found.
Common status codes: Q, R, C (queued, running, complete).
"""
# column location of various job identifiers
col_loc = {'id': 0, 'name': 3}
qstat_rows = PBS.qstat()
if qstat_rows is None:
return None
else:
# reverse the list so most recent jobs are first
qstat_rows = reversed(qstat_rows)
# update job status from qstat list
for row in qstat_rows:
row = row.split()
# make sure the row is long enough to be a job status listing
if len(row) > 10:
if row[col_loc[var]].strip() == job.strip():
# Job status is located at the -2 index
status = row[-2]
logger.debug('Job with {} "{}" has status: "{}"'
.format(var, job, status))
return status
return None
@staticmethod
def qstat():
"""Run the PBS qstat command and return the stdout split to rows.
Returns
-------
qstat_rows : list | None
List of strings where each string is a row in the qstat printout.
Returns None if qstat is empty.
"""
cmd = 'qstat -u {user}'.format(user=PBS.USER)
stdout, _ = PBS.submit(cmd)
if not stdout:
# No jobs are currently running.
return None
else:
qstat_rows = stdout.split('\n')
return qstat_rows
def qsub(self, cmd, alloc, queue, name='reV', feature=None,
stdout_path='./stdout', keep_sh=False):
"""Submit a PBS job via qsub command and PBS shell script
Parameters
----------
cmd : str
Command to be submitted in PBS shell script. Example:
'python -m reV.generation.cli_gen'
alloc : str
HPC allocation account. Example: 'rev'.
queue : str
HPC queue to submit job to. Example: 'short', 'batch-h', etc...
name : str
PBS job name.
feature : str | None
PBS feature request (-l {feature}).
Example: 'feature=24core', 'qos=high', etc...
stdout_path : str
Path to print .stdout and .stderr files.
keep_sh : bool
Boolean to keep the .sh files. Default is to remove these files
after job submission.
Returns
-------
out : str
qsub standard output, this is typically the PBS job ID.
err : str
qsub standard error, this is typically an empty string if the job
was submitted successfully.
"""
status = self.check_status(name, var='name')
if status in ('Q', 'R'):
warn('Not submitting job "{}" because it is already in '
'qstat with status: "{}"'.format(name, status))
out = None
err = 'already_running'
else:
feature_str = '#PBS -l {}\n'.format(str(feature).replace(' ', ''))
fname = '{}.sh'.format(name)
script = ('#!/bin/bash\n'
'#PBS -N {n} # job name\n'
'#PBS -A {a} # allocation account\n'
'#PBS -q {q} # queue (debug, short, batch, or long)\n'
'#PBS -o {p}/{n}_$PBS_JOBID.o\n'
'#PBS -e {p}/{n}_$PBS_JOBID.e\n'
'{L}'
'echo Running on: $HOSTNAME, Machine Type: $MACHTYPE\n'
'{cmd}'
.format(n=name, a=alloc, q=queue, p=stdout_path,
L=feature_str if feature else '',
cmd=cmd))
# write the shell script file and submit as qsub job
self.make_sh(fname, script)
out, err = self.submit('qsub {script}'.format(script=fname))
if not err:
logger.debug('PBS job "{}" with id #{} submitted successfully'
.format(name, out))
if not keep_sh:
self.rm(fname)
return out, err
class SLURM(SubprocessManager):
"""Subclass for SLURM subprocess jobs."""
def __init__(self, cmd, alloc, walltime, memory=None, feature=None,
name='reV', stdout_path='./stdout', conda_env=None,
module=None, module_root='/shared-projects/rev/modulefiles'):
"""Initialize and submit a PBS job.
Parameters
----------
cmd : str
Command to be submitted in PBS shell script. Example:
'python -m reV.generation.cli_gen'
alloc : str
HPC project (allocation) handle. Example: 'rev'.
walltime : float
Node walltime request in hours.
memory : int, Optional
Node memory request in GB.
feature : str
Additional flags for SLURM job. Format is "--qos=high"
or "--depend=[state:job_id]". Default is None.
name : str
SLURM job name.
stdout_path : str
Path to print .stdout and .stderr files.
conda_env : str
Conda environment to activate
module : str
Module to load
module_root : str
Path to module root to load
"""
self.make_path(stdout_path)
self.out, self.err = self.sbatch(cmd,
alloc=alloc,
memory=memory,
walltime=walltime,
feature=feature,
name=name,
stdout_path=stdout_path,
conda_env=conda_env,
module=module,
module_root=module_root)
if self.out:
self.id = self.out.split(' ')[-1]
else:
self.id = None
@staticmethod
def check_status(job, var='id'):
"""Check the status of this PBS job using qstat.
Parameters
----------
job : str
Job name or ID number.
var : str
Identity/type of job identification input arg ('id' or 'name').
Returns
-------
out : str | NoneType
squeue job status str or None if not found.
Common status codes: PD, R, CG (pending, running, complete).
"""
# column location of various job identifiers
col_loc = {'id': 0, 'name': 2}
if var == 'name':
# check for specific name
squeue_rows = SLURM.squeue(name=job)
else:
squeue_rows = SLURM.squeue()
if squeue_rows is None:
return None
else:
# reverse the list so most recent jobs are first
squeue_rows = reversed(squeue_rows)
# update job status from qstat list
for row in squeue_rows:
row = row.split()
# make sure the row is long enough to be a job status listing
if len(row) > 7:
if row[col_loc[var]].strip() in job.strip():
# Job status is located at the 4 index
status = row[4]
logger.debug('Job with {} | |
is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRecommendationIds failed: unknown result")
def getBlockedRecommendationIds(self, syncReason):
"""
Parameters:
- syncReason
"""
self.send_getBlockedRecommendationIds(syncReason)
return self.recv_getBlockedRecommendationIds()
def send_getBlockedRecommendationIds(self, syncReason):
self._oprot.writeMessageBegin('getBlockedRecommendationIds', TMessageType.CALL, self._seqid)
args = getBlockedRecommendationIds_args()
args.syncReason = syncReason
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getBlockedRecommendationIds(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getBlockedRecommendationIds_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getBlockedRecommendationIds failed: unknown result")
def getAllContactIds(self, syncReason):
"""
Parameters:
- syncReason
"""
self.send_getAllContactIds(syncReason)
return self.recv_getAllContactIds()
def send_getAllContactIds(self, syncReason):
self._oprot.writeMessageBegin('getAllContactIds', TMessageType.CALL, self._seqid)
args = getAllContactIds_args()
args.syncReason = syncReason
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getAllContactIds(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getAllContactIds_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getAllContactIds failed: unknown result")
def getContact(self, id):
"""
Parameters:
- id
"""
self.send_getContact(id)
return self.recv_getContact()
def send_getContact(self, id):
self._oprot.writeMessageBegin('getContact', TMessageType.CALL, self._seqid)
args = getContact_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getContact(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getContact_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getContact failed: unknown result")
def fetchOps(self, localRev, count, globalRev, individualRev):
"""
Parameters:
- localRev
- count
- globalRev
- individualRev
"""
self.send_fetchOps(localRev, count, globalRev, individualRev)
return self.recv_fetchOps()
def send_fetchOps(self, localRev, count, globalRev, individualRev):
self._oprot.writeMessageBegin('fetchOps', TMessageType.CALL, self._seqid)
args = fetchOps_args()
args.localRev = localRev
args.count = count
args.globalRev = globalRev
args.individualRev = individualRev
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_fetchOps(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = fetchOps_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchOps failed: unknown result")
def sendMessage(self, seq, message):
"""
Parameters:
- seq
- message
"""
self.send_sendMessage(seq, message)
return self.recv_sendMessage()
def send_sendMessage(self, seq, message):
self._oprot.writeMessageBegin('sendMessage', TMessageType.CALL, self._seqid)
args = sendMessage_args()
args.seq = seq
args.message = message
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_sendMessage(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = sendMessage_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "sendMessage failed: unknown result")
def sendChatChecked(self, seq, chatMid, lastMessageId, sessionId):
"""
Parameters:
- seq
- chatMid
- lastMessageId
- sessionId
"""
self.send_sendChatChecked(seq, chatMid, lastMessageId, sessionId)
self.recv_sendChatChecked()
def send_sendChatChecked(self, seq, chatMid, lastMessageId, sessionId):
self._oprot.writeMessageBegin('sendChatChecked', TMessageType.CALL, self._seqid)
args = sendChatChecked_args()
args.seq = seq
args.chatMid = chatMid
args.lastMessageId = lastMessageId
args.sessionId = sessionId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_sendChatChecked(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = sendChatChecked_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def getRoom(self, roomId):
"""
Parameters:
- roomId
"""
self.send_getRoom(roomId)
return self.recv_getRoom()
def send_getRoom(self, roomId):
self._oprot.writeMessageBegin('getRoom', TMessageType.CALL, self._seqid)
args = getRoom_args()
args.roomId = roomId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getRoom(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getRoom_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getRoom failed: unknown result")
def updateChat(self, request):
"""
Parameters:
- request
"""
self.send_updateChat(request)
return self.recv_updateChat()
def send_updateChat(self, request):
self._oprot.writeMessageBegin('updateChat', TMessageType.CALL, self._seqid)
args = updateChat_args()
args.request = request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateChat(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = updateChat_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "updateChat failed: unknown result")
def getChats(self, request):
"""
Parameters:
- request
"""
self.send_getChats(request)
return self.recv_getChats()
def send_getChats(self, request):
self._oprot.writeMessageBegin('getChats', TMessageType.CALL, self._seqid)
args = getChats_args()
args.request = request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getChats(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getChats_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getChats failed: unknown result")
def getE2EEPublicKeysEx(self, ignoreE2EEStatus, syncReason):
"""
Parameters:
- ignoreE2EEStatus
- syncReason
"""
self.send_getE2EEPublicKeysEx(ignoreE2EEStatus, syncReason)
return self.recv_getE2EEPublicKeysEx()
def send_getE2EEPublicKeysEx(self, ignoreE2EEStatus, syncReason):
self._oprot.writeMessageBegin('getE2EEPublicKeysEx', TMessageType.CALL, self._seqid)
args = getE2EEPublicKeysEx_args()
args.ignoreE2EEStatus = ignoreE2EEStatus
args.syncReason = syncReason
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getE2EEPublicKeysEx(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getE2EEPublicKeysEx_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getE2EEPublicKeysEx failed: unknown result")
def respondE2EEKeyExchange(self, reqSeq, encryptedKeyChain, hashKeyChain):
"""
Parameters:
- reqSeq
- encryptedKeyChain
- hashKeyChain
"""
self.send_respondE2EEKeyExchange(reqSeq, encryptedKeyChain, hashKeyChain)
self.recv_respondE2EEKeyExchange()
def send_respondE2EEKeyExchange(self, reqSeq, encryptedKeyChain, hashKeyChain):
self._oprot.writeMessageBegin('respondE2EEKeyExchange', TMessageType.CALL, self._seqid)
args = respondE2EEKeyExchange_args()
args.reqSeq = reqSeq
args.encryptedKeyChain = encryptedKeyChain
args.hashKeyChain = hashKeyChain
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_respondE2EEKeyExchange(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = respondE2EEKeyExchange_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def getLastE2EEGroupSharedKey(self, keyVersion, chatMid):
"""
Parameters:
- keyVersion
- chatMid
"""
self.send_getLastE2EEGroupSharedKey(keyVersion, chatMid)
return self.recv_getLastE2EEGroupSharedKey()
def send_getLastE2EEGroupSharedKey(self, keyVersion, chatMid):
self._oprot.writeMessageBegin('getLastE2EEGroupSharedKey', TMessageType.CALL, self._seqid)
args = getLastE2EEGroupSharedKey_args()
args.keyVersion = keyVersion
args.chatMid = chatMid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getLastE2EEGroupSharedKey(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getLastE2EEGroupSharedKey_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getLastE2EEGroupSharedKey failed: unknown result")
def verifyQrcode(self, verifier, pinCode):
"""
Parameters:
- verifier
- pinCode
"""
self.send_verifyQrcode(verifier, pinCode)
return self.recv_verifyQrcode()
def send_verifyQrcode(self, verifier, pinCode):
self._oprot.writeMessageBegin('verifyQrcode', TMessageType.CALL, self._seqid)
args = verifyQrcode_args()
args.verifier = verifier
args.pinCode = pinCode
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_verifyQrcode(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = verifyQrcode_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "verifyQrcode failed: unknown result")
def getConfigurations(self, revision, regionOfUsim, regionOfTelephone, regionOfLocale, carrier, syncReason):
"""
Parameters:
- revision
- regionOfUsim
- regionOfTelephone
- regionOfLocale
- carrier
- syncReason
"""
self.send_getConfigurations(revision, regionOfUsim, regionOfTelephone, regionOfLocale, carrier, syncReason)
return self.recv_getConfigurations()
def send_getConfigurations(self, revision, regionOfUsim, regionOfTelephone, regionOfLocale, carrier, syncReason):
self._oprot.writeMessageBegin('getConfigurations', TMessageType.CALL, self._seqid)
args = getConfigurations_args()
args.revision = revision
args.regionOfUsim = regionOfUsim
args.regionOfTelephone = regionOfTelephone
args.regionOfLocale = regionOfLocale
args.carrier = carrier
args.syncReason = syncReason
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getConfigurations(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getConfigurations_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getConfigurations failed: unknown result")
def noop(self):
self.send_noop()
self.recv_noop()
def send_noop(self):
self._oprot.writeMessageBegin('noop', TMessageType.CALL, self._seqid)
args = noop_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_noop(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = noop_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def getServerTime(self):
self.send_getServerTime()
return self.recv_getServerTime()
def send_getServerTime(self):
self._oprot.writeMessageBegin('getServerTime', TMessageType.CALL, self._seqid)
args = getServerTime_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getServerTime(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getServerTime_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getServerTime failed: unknown result")
def setNotificationsEnabled(self, reqSeq, type, target, enablement):
"""
Parameters:
- reqSeq
- type
- target
- enablement
"""
self.send_setNotificationsEnabled(reqSeq, type, target, enablement)
self.recv_setNotificationsEnabled()
def send_setNotificationsEnabled(self, reqSeq, type, target, enablement):
self._oprot.writeMessageBegin('setNotificationsEnabled', TMessageType.CALL, self._seqid)
args = setNotificationsEnabled_args()
args.reqSeq = reqSeq
args.type = type
| |
import yaml
import numpy
import scipy.interpolate
class Material:
""" Material class"""
def __init__(self, filename, interpolation_points=100, empty=False):
"""
:param filename: The name of the material file
:interpolation_points=100: The number of interpolation_points
:empty=False: Create an empty material instance
"""
self.refractiveIndex = None
self.extinctionCoefficient = None
self.points = interpolation_points
if empty:
return
f = open(filename)
try:
material = yaml.safe_load(f)
except yaml.YAMLError:
raise Exception('Bad Material YAML File.')
finally:
f.close()
previous_formula = False
for data in material['DATA']:
if (data['type'].split())[0] == 'tabulated':
rows = data['data'].split('\n')
splitrows = [c.split() for c in rows]
wavelengths = []
n = []
k = []
for s in splitrows:
if len(s) > 0:
wavelengths.append(float(s[0]))
n.append(float(s[1]))
if len(s) > 2:
k.append(float(s[2]))
self.points = len(wavelengths)
if (data['type'].split())[1] == 'n':
if self.refractiveIndex is not None:
Exception('Bad Material YAML File')
self.refractiveIndex = RefractiveIndexData.\
SetupRefractiveIndex(formula=-1,
wavelengths=wavelengths,
values=n)
elif (data['type'].split())[1] == 'k':
self.extinctionCoefficient = ExtinctionCoefficientData.\
SetupExtinctionCoefficient(wavelengths, n)
if previous_formula:
self.refractiveIndex = RefractiveIndexData.\
SetupRefractiveIndex(
formula=formula, rangeMin=rangeMin,
rangeMax=rangeMax, coefficients=coefficients,
interpolation_points=self.points)
elif (data['type'].split())[1] == 'nk':
if self.refractiveIndex is not None:
Exception('Bad Material YAML File')
self.refractiveIndex = RefractiveIndexData.\
SetupRefractiveIndex(formula=-1,
wavelengths=wavelengths,
values=n)
self.extinctionCoefficient = ExtinctionCoefficientData.\
SetupExtinctionCoefficient(wavelengths, k)
elif (data['type'].split())[0] == 'formula':
if self.refractiveIndex is not None:
Exception('Bad Material YAML File')
formula = int((data['type'].split())[1])
coefficients = [float(s) for s in data['coefficients'].split()]
rangeMin, rangeMax = map(float,
data['wavelength_range'].split())
previous_formula = True
self.refractiveIndex = RefractiveIndexData.\
SetupRefractiveIndex(formula=formula,
rangeMin=rangeMin,
rangeMax=rangeMax,
coefficients=coefficients,
interpolation_points=self.points)
if self.refractiveIndex is not None:
self.rangeMin = self.refractiveIndex.rangeMin
self.rangeMax = self.refractiveIndex.rangeMax
else:
self.rangeMin = self.extinctionCoefficient.rangeMin
self.rangeMax = self.extinctionCoefficient.rangeMax
def get_refractiveindex(self, wavelength):
"""
Get the refractive index at a certain wavelenght
:param wavelength: The wavelength in nm
:returns: refractive index
:raises Exception:
"""
if self.refractiveIndex is None:
raise Exception('No refractive index specified for this material')
else:
return self.refractiveIndex.get_refractiveindex(wavelength)
def get_extinctioncoefficient(self, wavelength):
"""
Get the extinction coefficient
:param wavelength:
:returns: extiction coefficient
:raises NoExtinctionCoefficient:
"""
if self.extinctionCoefficient is None:
raise NoExtinctionCoefficient('No extinction coefficient'
'specified for this material')
else:
return self.extinctionCoefficient.\
get_extinction_coefficient(wavelength)
def get_complete_extinction(self):
'''
Get the complete extinction coefficient information
:returns: The extinction coefficient informations as a list of lists
'''
if self.has_extinction():
return self.extinctionCoefficient.get_complete_extinction()
else:
return None
def get_complete_refractive(self):
'''
Get the complete refractive information
:returns: The refractive index informations as a list of lists
'''
if self.has_refractive():
return self.refractiveIndex.get_complete_refractive()
else:
return None
def has_refractive(self):
'''
Checks if there is a refractive index
'''
return self.refractiveIndex is not None
def has_extinction(self):
'''
Checks if there is a extinction coefficient
'''
return self.extinctionCoefficient is not None
def get_page_info(self):
'''
Get the page informations
'''
return self.pageinfo
def to_csv(self, output):
'''
Safe this material as a comma seperated value list
:param output: The output file
'''
refr = self.get_complete_refractive()
ext = self.get_complete_extinction()
# FizzFuzz
if self.has_refractive() and self.has_extinction() and\
len(refr) == len(ext):
header = "wl,n,k\n"
output_f = open(output.replace(".csv", "(nk).csv"), 'w')
output_f.write(header)
for i in range(len(refr)):
output_f.write(",".join(list(
map(str, [refr[i][0], refr[i][1], ext[i][1]])))+"\n")
output_f.close()
print("Wrote", output.replace(".csv", "(nk).csv"))
else:
if self.has_refractive():
output_f = open(output.replace(".csv", "(n).csv"), 'w')
header = "wl,n\n"
output_f.write(header)
for i in range(len(refr)):
output_f.write(",".join(list(
map(str, [refr[i][0], refr[i][1]])))+"\n")
output_f.close()
print("Wrote", output.replace(".csv", "(n).csv"))
if self.has_extinction():
output_f = open(output.replace(".csv", "(k).csv"), 'w')
header = "wl,k\n"
output_f.write(header)
for i in range(len(ext)):
output_f.write(",".join(list(
map(str, [ext[i][0], ext[i][1]])))+"\n")
output_f.close()
print("Wrote", output.replace(".csv", "(k).csv"))
@staticmethod
def FromLists(pageinfo, wavelengths_r=None, refractive=None,
wavelengths_e=None, extinction=None):
'''
Create a material from lists of wavelength refractive indices
and extinction coefficients
:param pageinfo: The pageinfo of the material
:param wavelengths_r: A list of wavelengths for the refractive index
:param refractive: A list of refractive indices
:param wavelengths_e: A list of wavelengths_e for the extinction coeff
:param extinction: A list of extinction coefficients
:returns: A material
'''
mat = Material("", empty=True)
mat.pageinfo = pageinfo
if refractive is not None:
mat.refractiveIndex = TabulatedRefractiveIndexData.\
FromLists(wavelengths_r, refractive)
mat.rangeMin = mat.refractiveIndex.rangeMin
mat.rangeMax = mat.refractiveIndex.rangeMax
if extinction is not None:
mat.extinctionCoefficient = ExtinctionCoefficientData.\
FromLists(wavelengths_e, extinction)
mat.rangeMin = mat.extinctionCoefficient.rangeMin
mat.rangeMax = mat.extinctionCoefficient.rangeMax
return mat
class RefractiveIndexData:
"""Abstract RefractiveIndex class"""
@staticmethod
def SetupRefractiveIndex(formula, **kwargs):
"""
:param formula: An integer value specifying the formula to use
pass -1 to create a tabulated refractive index data
:param kwargs: kwargs passed to the FormulaRefractiveIndexData
or TabulatedRefractiveIndexData
:returns: A formula or tabulated refractive index data
:raises Exception:
"""
if formula >= 0:
return FormulaRefractiveIndexData(formula, **kwargs)
elif formula == -1:
return TabulatedRefractiveIndexData(**kwargs)
else:
raise Exception('Bad RefractiveIndex data type')
def get_refractiveindex(self, wavelength):
"""
Not implemented yet
:param wavelength:
:raise NotImplementedError:
"""
raise NotImplementedError('Different for functionally'
'and experimentally defined materials')
class FormulaRefractiveIndexData:
"""Formula RefractiveIndex class"""
def __init__(self, formula, rangeMin, rangeMax, coefficients,
interpolation_points):
"""
:param formula: An integer value specifying the formula
:param rangeMin: The lower bound for the wavelength
:param rangeMax: The upper bound for the wavelength
:param coefficients: Coefficient to interpolate over
"""
self.formula = formula
self.rangeMin = rangeMin
self.rangeMax = rangeMax
self.coefficients = coefficients
self.interpolation_points = interpolation_points
def get_complete_refractive(self):
'''
Get the complete refractive index for the whole wavelength intervall
:returns: A list of refractive indices over the whole
wavelength intervall (len = interpolation_points)
'''
wavelength = numpy.linspace(
self.rangeMin, self.rangeMax, num=self.interpolation_points)
extlist = [[
wavelength[i],
self.get_refractiveindex(wavelength[i] * 1000)]
for i in range(len(wavelength))]
# return numpy.array(extlist)
return extlist
def get_refractiveindex(self, wavelength):
"""
Get the refractive index at a certain wavelength
using the speficied interpolation formula
:param wavelength:
:returns: The interpolated refractive index at wavelength
:raises Exception:
"""
wavelength /= 1000.0
if self.rangeMin <= wavelength <= self.rangeMax:
formula_type = self.formula
coefficients = self.coefficients
n = 0
if formula_type == 1: # Sellmeier
nsq = 1 + coefficients[0]
def sellmeier(c1, c2, w):
return c1 * (w ** 2) / (w ** 2 - c2 ** 2)
for i in range(1, len(coefficients), 2):
nsq += sellmeier(coefficients[i],
coefficients[i + 1],
wavelength)
n = numpy.sqrt(nsq)
elif formula_type == 2: # Sellmeier-2
nsq = 1 + coefficients[0]
def sellmeier2(c1, c2, w):
return c1 * (w ** 2) / (w ** 2 - c2)
for i in range(1, len(coefficients), 2):
nsq += sellmeier2(coefficients[i],
coefficients[i + 1],
wavelength)
n = numpy.sqrt(nsq)
elif formula_type == 3: # Polynomal
def polynomial(c1, c2, w):
return c1 * w ** c2
nsq = coefficients[0]
for i in range(1, len(coefficients), 2):
nsq += polynomial(coefficients[i],
coefficients[i + 1],
wavelength)
n = numpy.sqrt(nsq)
elif formula_type == 4: # RefractiveIndex.INFO
def riinfo(wl, ci, cj, ck, cl):
return ci * wl**cj / (wl**2 - ck**cl)
n = coefficients[0]
n += riinfo(wavelength, *coefficients[1:5])
n += riinfo(wavelength, *coefficients[5:9])
for kk in range(len(coefficients[9:]) // 2):
n += coefficients[9+kk] * wavelength**coefficients[9+kk+1]
n = numpy.sqrt(n)
elif formula_type == 5: # Cauchy
def cauchy(c1, c2, w):
return c1 * w ** c2
n = coefficients[0]
for i in range(1, len(coefficients), 2):
n += cauchy(coefficients[i],
coefficients[i + 1],
wavelength)
elif formula_type == 6: # Gasses
def gasses(c1, c2, w):
return c1 / (c2 - w ** (-2))
n = 1 + coefficients[0]
for i in range(1, len(coefficients), 2):
n += gasses(coefficients[i],
coefficients[i + 1],
wavelength)
elif formula_type == 7: # Herzberger
n = coefficients[0]
n += coefficients[1] / (wavelength**2 - 0.028)
n += coefficients[2] * (1 / (wavelength**2 - 0.028))**2
for i, cc in enumerate(coefficients[3:]):
n += cc * wavelength**(2*(i+1))
elif formula_type == 8: # Retro
n = coefficients[0]
n += coefficients[1] * wavelength**2 /\
(wavelength**2 - coefficients[2])
n += coefficients[3] * wavelength**2
n = numpy.sqrt(-(2 * n + 1) / (n - 1))
elif formula_type == 9: # Exotic
n = coefficients[0]
n += coefficients[1] / (wavelength**2 - coefficients[2])
n += coefficients[3] * (wavelength - coefficients[4]) / \
((wavelength - coefficients[4])**2 + coefficients[5])
n = numpy.sqrt(n)
else:
raise Exception('Bad formula type')
return n
else:
raise Exception('Wavelength {} is out of bounds.'
'Correct range(um): ({}, {})'.
format(wavelength, self.rangeMin, self.rangeMax))
class TabulatedRefractiveIndexData:
"""Tabulated RefractiveIndex class"""
def __init__(self, wavelengths, values):
"""
Crete a TabulatedRefractiveIndexData from a list of
wavelengths and values
:param wavelengths:
:param values:
"""
self.rangeMin = numpy.min(wavelengths)
self.rangeMax = numpy.max(wavelengths)
if self.rangeMin == self.rangeMax:
self.refractiveFunction = values[0]
else:
self.refractiveFunction = scipy.interpolate.interp1d(wavelengths,
values)
self.wavelengths = wavelengths
self.coefficients = values
@staticmethod
def FromLists(wavelengths, values):
"""
Crete a TabulatedRefractiveIndexData from a list of
wavelengths and values
"""
return TabulatedRefractiveIndexData(wavelengths, values)
def get_refractiveindex(self, wavelength):
"""
Get | |
import numpy as np
import torch as t
import torch.nn as nn
import torch.distributed as dist
from jukebox.transformer.ops import LayerNorm
from jukebox.prior.autoregressive import ConditionalAutoregressive2D
from jukebox.prior.conditioners import Conditioner, LabelConditioner
from jukebox.data.labels import Labeller
from jukebox.utils.torch_utils import assert_shape
from jukebox.utils.dist_utils import print_once
from jukebox.vqvae.vqvae import calculate_strides
"""
TODO: Simplify this into an easier structure
enc-dec prior vs single-transformer prior
remove the vqvae pre-post processing into a separate vqvae-coder class
"""
class SimplePrior(nn.Module):
def __init__(self, z_shapes, l_bins, encoder, decoder, level,
downs_t, strides_t, labels, prior_kwargs, x_cond_kwargs, y_cond_kwargs,
prime_kwargs, copy_input, labels_v3=False,
merged_decoder=False, single_enc_dec=False):
super().__init__()
self.use_tokens = prime_kwargs.pop('use_tokens')
self.n_tokens = prime_kwargs.pop('n_tokens')
self.prime_loss_fraction = prime_kwargs.pop('prime_loss_fraction')
self.copy_input = copy_input
if self.copy_input:
prime_kwargs['bins'] = l_bins
self.z_shapes = z_shapes
self.levels = len(self.z_shapes)
self.z_shape = self.z_shapes[level]
self.level = level
assert level < self.levels, f"Total levels {self.levels}, got level {level}"
self.l_bins = l_bins
# Passing functions instead of the vqvae module to avoid getting params
self.encoder = encoder
self.decoder = decoder
# X conditioning
self.x_cond = (level != (self.levels - 1))
self.cond_level = level + 1
# Y conditioning
self.y_cond = labels
self.single_enc_dec = single_enc_dec
# X conditioning
if self.x_cond:
self.conditioner_blocks = nn.ModuleList()
conditioner_block = lambda _level: Conditioner(input_shape=z_shapes[_level],
bins=l_bins,
down_t=downs_t[_level],
stride_t=strides_t[_level],
**x_cond_kwargs)
if dist.get_rank() == 0: print(f"Conditioning on 1 above level(s)")
self.conditioner_blocks.append(conditioner_block(self.cond_level))
# Y conditioning
if self.y_cond:
self.n_time = self.z_shape[0] # Assuming STFT=TF order and raw=T1 order, so T is first dim
self.y_emb = LabelConditioner(n_time=self.n_time,include_time_signal=not self.x_cond,**y_cond_kwargs)
# Lyric conditioning
if single_enc_dec:
# Single encoder-decoder transformer
self.prior_shapes = [(self.n_tokens,), prior_kwargs.pop('input_shape')]
self.prior_bins = [prime_kwargs['bins'], prior_kwargs.pop('bins')]
self.prior_dims = [np.prod(shape) for shape in self.prior_shapes]
self.prior_bins_shift = np.cumsum([0, *self.prior_bins])[:-1]
self.prior_width = prior_kwargs['width']
print_once(f'Creating cond. autoregress with prior bins {self.prior_bins}, ')
print_once(f'dims {self.prior_dims}, ')
print_once(f'shift {self.prior_bins_shift}')
print_once(f'input shape {sum(self.prior_dims)}')
print_once(f'input bins {sum(self.prior_bins)}')
print_once(f'Self copy is {self.copy_input}')
self.prime_loss_dims, self.gen_loss_dims = self.prior_dims[0], self.prior_dims[1]
self.total_loss_dims = self.prime_loss_dims + self.gen_loss_dims
self.prior = ConditionalAutoregressive2D(input_shape=(sum(self.prior_dims),),
bins=sum(self.prior_bins),
x_cond=(self.x_cond or self.y_cond), y_cond=True,
prime_len=self.prime_loss_dims,
**prior_kwargs)
else:
# Separate encoder-decoder transformer
if self.n_tokens != 0 and self.use_tokens:
from jukebox.transformer.ops import Conv1D
prime_input_shape = (self.n_tokens,)
self.prime_loss_dims = np.prod(prime_input_shape)
self.prime_acts_width, self.prime_state_width = prime_kwargs['width'], prior_kwargs['width']
self.prime_prior = ConditionalAutoregressive2D(input_shape=prime_input_shape, x_cond=False, y_cond=False,
only_encode=True,
**prime_kwargs)
self.prime_state_proj = Conv1D(self.prime_acts_width, self.prime_state_width, init_scale=prime_kwargs['init_scale'])
self.prime_state_ln = LayerNorm(self.prime_state_width)
self.prime_bins = prime_kwargs['bins']
self.prime_x_out = nn.Linear(self.prime_state_width, self.prime_bins, bias=False)
nn.init.normal_(self.prime_x_out.weight, std=0.02 * prior_kwargs['init_scale'])
else:
self.prime_loss_dims = 0
self.gen_loss_dims = np.prod(self.z_shape)
self.total_loss_dims = self.prime_loss_dims + self.gen_loss_dims
self.prior = ConditionalAutoregressive2D(x_cond=(self.x_cond or self.y_cond), y_cond=self.y_cond,
encoder_dims = self.prime_loss_dims, merged_decoder=merged_decoder,
**prior_kwargs)
self.n_ctx = self.gen_loss_dims
self.downsamples = calculate_strides(strides_t, downs_t)
self.cond_downsample = self.downsamples[level+1] if level != self.levels - 1 else None
self.raw_to_tokens = np.prod(self.downsamples[:level+1])
self.sample_length = self.n_ctx*self.raw_to_tokens
if labels:
self.labels_v3 = labels_v3
self.labeller = Labeller(self.y_emb.max_bow_genre_size, self.n_tokens, self.sample_length, v3=self.labels_v3)
print(f"Level:{level}, Cond downsample:{self.cond_downsample}, Raw to tokens:{self.raw_to_tokens}, Sample length:{self.sample_length}")
def get_y(self, labels, start, get_indices=False):
y = labels['y'].clone()
# Set sample_length to match this level
y[:, 2] = int(self.sample_length)
# Set offset
y[:, 1:2] = y[:, 1:2] + int(start * self.raw_to_tokens)
# Set lyric tokens
indices = self.labeller.set_y_lyric_tokens(y, labels)
if get_indices:
return y, indices
else:
return y
def get_z_conds(self, zs, start, end):
if self.level != self.levels - 1:
assert start % self.cond_downsample == end % self.cond_downsample == 0
z_cond = zs[self.level + 1][:,start//self.cond_downsample:end//self.cond_downsample]
assert z_cond.shape[1] == self.n_ctx//self.cond_downsample
z_conds = [z_cond]
else:
z_conds = None
return z_conds
def prior_preprocess(self, xs, conds):
N = xs[0].shape[0]
for i in range(len(xs)):
x, shape, dims = xs[i], self.prior_shapes[i], self.prior_dims[i]
bins, bins_shift = int(self.prior_bins[i]), int(self.prior_bins_shift[i])
assert isinstance(x, t.cuda.LongTensor), x
assert (0 <= x).all() and (x < bins).all()
#assert_shape(x, (N, *shape))
xs[i] = (xs[i] + bins_shift).view(N, -1)
for i in range(len(conds)):
cond, shape, dims = conds[i], self.prior_shapes[i], self.prior_dims[i]
if cond is not None:
assert_shape(cond, (N, dims, self.prior_width))
else:
conds[i] = t.zeros((N, dims, self.prior_width), dtype=t.float, device='cuda')
return t.cat(xs, dim=1), t.cat(conds, dim=1)
def prior_postprocess(self, z):
N = z.shape[0]
dims = (self.prior_dims[0], z.shape[1] - self.prior_dims[0])
# xs = list(t.split(z, self.prior_dims, dim=1))
xs = list(t.split(z, dims, dim=1))
for i in range(len(xs)):
# x, shape, dims, bins, bins_shift = xs[i], self.prior_shapes[i], self.prior_dims[i], self.prior_bins[i], self.prior_bins_shift[i]
# assert_shape(x, (N, dims))
shape = self.prior_shapes[i]
bins, bins_shift = int(self.prior_bins[i]), int(self.prior_bins_shift[i])
# xs[i] = (xs[i] - bins_shift).view(N, *shape) #view(N, -1, *shape[1:])
xs[i] = (xs[i] - bins_shift).view(N, -1, *shape[1:])
xs[i] = t.clamp(xs[i], min=0) # If not masking loss, model may have generated lyric/midi tokens which are now shifted <0 by bin_shift
assert (xs[i] < bins).all(), f'rank: {dist.get_rank()}, bins: {bins}, dims {dims}, shape {shape}, prior_shape {self.prior_shapes}, bins_shift {bins_shift}, xs[i]: {xs[i]}'
return xs[-1]
def x_emb(self, z_conds):
z_conds = z_conds[:self.cond_level - self.level]
assert len(z_conds) == len(self.conditioner_blocks) == self.cond_level - self.level, f"Expected {len(z_conds)} == {len(self.conditioner_blocks)} == {self.cond_level} - {self.level}"
x_cond = None
for z_cond, conditioner_block in reversed(list(zip(z_conds, self.conditioner_blocks))):
x_cond = conditioner_block(z_cond, x_cond)
return x_cond
def encode(self, x, start_level=None, end_level=None, bs_chunks=1):
if start_level == None:
start_level = self.level
if end_level == None:
end_level = self.levels
# Get latents
with t.no_grad():
zs = self.encoder(x, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks)
return zs
def decode(self, zs, start_level=None, end_level=None, bs_chunks=1):
if start_level == None:
start_level = self.level
if end_level == None:
end_level = self.levels
assert len(zs) == end_level - start_level
with t.no_grad():
x_out = self.decoder(zs, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks)
return x_out
def get_cond(self, z_conds, y):
if y is not None:
assert y.shape[1] == 4 + self.y_emb.max_bow_genre_size + self.n_tokens, f"Expected {4} + {self.y_emb.max_bow_genre_size} + {self.n_tokens}, got {y.shape[1]}"
n_labels = y.shape[1] - self.n_tokens
y, prime = y[:,:n_labels], y[:,n_labels:]
else:
y, prime = None, None
y_cond, y_pos = self.y_emb(y) if self.y_cond else (None, None)
x_cond = self.x_emb(z_conds) if self.x_cond else y_pos
return x_cond, y_cond, prime
def sample(self, n_samples, z=None, z_conds=None, y=None, fp16=False, temp=1.0, top_k=0, top_p=0.0,
chunk_size=None, sample_tokens=None):
N = n_samples
if z is not None: assert z.shape[0] == N, f"Expected shape ({N},**), got shape {z.shape}"
if y is not None: assert y.shape[0] == N, f"Expected shape ({N},**), got shape {y.shape}"
if z_conds is not None:
for z_cond in z_conds:
assert z_cond.shape[0] == N, f"Expected shape ({N},**), got shape {z_cond.shape}"
no_past_context = (z is None or z.shape[1] == 0)
if dist.get_rank() == 0:
name = {True: 'Ancestral', False: 'Primed'}[no_past_context]
print(f"{name} sampling {n_samples} samples with temp={temp}, top_k={top_k}, top_p={top_p}")
with t.no_grad():
# Currently x_cond only uses immediately above layer
x_cond, y_cond, prime = self.get_cond(z_conds, y)
if self.single_enc_dec:
# assert chunk_size % self.prime_loss_dims == 0. TODO: Check if needed
if no_past_context:
z, x_cond = self.prior_preprocess([prime], [None, x_cond])
else:
z, x_cond = self.prior_preprocess([prime, z], [None, x_cond])
if sample_tokens is not None:
sample_tokens += self.n_tokens
z = self.prior.primed_sample(n_samples, z, x_cond, y_cond, fp16=fp16, temp=temp,
top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens)
z = self.prior_postprocess(z)
else:
encoder_kv = self.get_encoder_kv(prime, fp16=fp16, sample=True)
if no_past_context:
z = self.prior.sample(n_samples, x_cond, y_cond, encoder_kv, fp16=fp16, temp=temp, top_k=top_k,
top_p=top_p, sample_tokens=sample_tokens)
else:
z = self.prior.primed_sample(n_samples, z, x_cond, y_cond, encoder_kv, fp16=fp16, temp=temp,
top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens)
if sample_tokens is None:
assert_shape(z, (N, *self.z_shape))
return z
def get_encoder_kv(self, prime, fp16=False, sample=False):
if self.n_tokens != 0 and self.use_tokens:
if sample:
self.prime_prior.cuda()
N = prime.shape[0]
prime_acts = self.prime_prior(prime, None, None, None, fp16=fp16)
assert_shape(prime_acts, (N, self.prime_loss_dims, self.prime_acts_width))
assert prime_acts.dtype == t.float, f'Expected t.float, got {prime_acts.dtype}'
encoder_kv = self.prime_state_ln(self.prime_state_proj(prime_acts))
assert encoder_kv.dtype == t.float, f'Expected t.float, got {encoder_kv.dtype}'
if sample:
self.prime_prior.cpu()
if fp16:
encoder_kv = encoder_kv.half()
else:
encoder_kv = None
return encoder_kv
def get_prime_loss(self, encoder_kv, prime_t):
if self.use_tokens:
encoder_kv = encoder_kv.float()
encoder_kv = self.prime_x_out(encoder_kv)
prime_loss = nn.functional.cross_entropy(encoder_kv.view(-1, self.prime_bins), prime_t.view(-1)) / np.log(2.)
else:
prime_loss = t.tensor(0.0, device='cuda')
return prime_loss
def z_forward(self, z, z_conds=[], y=None, fp16=False, get_preds=False, get_attn_weights=False):
"""
Arguments:
get_attn_weights (bool or set): Makes forward prop dump
self-attention softmaxes to self.prior.transformer.ws. Either a
set of layer indices indicating which layers to store, or a
boolean value indicating whether to dump all.
"""
assert isinstance(get_attn_weights, (bool, set))
if get_attn_weights:
self.prior.transformer.set_record_attn(get_attn_weights)
x_cond, y_cond, prime = self.get_cond(z_conds, y)
if self.copy_input:
prime = z[:,:self.n_tokens]
if self.single_enc_dec:
z, x_cond = self.prior_preprocess([prime, z], [None, x_cond])
(prime_loss, gen_loss), preds = self.prior(z, x_cond, y_cond, fp16=fp16, get_sep_loss=True, get_preds=get_preds)
else:
encoder_kv = self.get_encoder_kv(prime, fp16=fp16)
prime_loss = self.get_prime_loss(encoder_kv, prime)
gen_loss, preds = self.prior(z, x_cond, y_cond, encoder_kv, fp16=fp16, get_preds=get_preds)
loss = (self.prime_loss_fraction*prime_loss*self.prime_loss_dims/self.total_loss_dims) + \
(gen_loss*self.gen_loss_dims/self.total_loss_dims)
metrics=dict(bpd=gen_loss.clone().detach(), prime_loss=prime_loss.clone().detach(),
gen_loss=gen_loss.clone().detach())
if get_preds:
metrics["preds"] = preds.clone().detach()
if get_attn_weights:
ws = self.prior.transformer.ws
self.prior.transformer.set_record_attn(False)
return ws
else:
return loss, metrics
def forward(self, x, y=None, fp16=False, decode=False, get_preds=False):
z, *z_conds = self.encode(x)
loss, metrics = self.z_forward(z=z, z_conds=z_conds, y=y, fp16=fp16, get_preds=get_preds)
if decode:
x_out = self.decode([z, | |
to pi'
ein = 1.0 * esig
pol = np.deg2rad(polarisation)
eout = np.cos(pol)*esig + np.sin(pol)*piout
if polarisation in ['sigmasigma', 'sigsig', 'ss']:
ein = 1.0 * esig
eout = 1.0 * esig
elif polarisation in ['sigmapi', 'sigpi', 'sp']:
ein = 1.0 * esig
eout = 1.0 * piout
elif polarisation in ['pisigma', 'pisig', 'ps']:
ein = 1.0 * piin
eout = 1.0 * esig
elif polarisation in ['pipi', 'pp']:
ein = 1.0 * piin
eout = 1.0 * piout
out_kin[n, :] = kin
out_kout[n, :] = kout
out_ein[n, :] = ein
out_eout[n, :] = eout
return out_kin, out_kout, out_ein, out_eout
def scatteringcomponents(self, mxmymz, hkl, azim_zero=[1,0,0], psi=0):
"""
Transform magnetic vector into components within the scattering plane
***warning - may not be correct for non-cubic systems***
"""
# Define coordinate system I,J,Q (U1,U2,U3)
U = self.scatteringbasis(hkl, azim_zero, psi)
# Determine components of the magnetic vector
z1z2z3 = np.dot(mxmymz, U.T) # [mxmymz.I, mxmymz.J, mxmymz.Q]
return fg.norm(z1z2z3)
def scatteringbasis(self, hkl, azim_zero=[1, 0, 0], psi=0):
"""
Determine the scattering and polarisation vectors of a reflection based on energy, azimuth and polarisation.
:param hkl: [n,3] array of reflections
:param azim_zero: [1,3] direction along which the azimuthal zero angle is determind
:param psi: float azimuthal angle about U3 in degrees
:return: U1, U2, U3
The basis is chosen such that Q defines the scattering plane, the sigma direction is normal to this plane,
the pi direction is always within this plane.
The azimuthal angle defines a rotation about the Q axis in a clockwise mannor, matching I16.
At an azimuth of 0degrees, U1 is perpendicular to Q, along the direction of azim_zero.
"""
# Define coordinate system I,J,Q (U1,U2,U3)
# See FDMNES User's Guide p20 'II-11) Anomalous or resonant diffraction'
# U1 || projection of azim_zero
# U2 _|_ U1,U3
# U3 || Q = kf-ki
azim_zero = fg.norm(self.xtl.Cell.calculateQ(azim_zero)) # put in orthogonal basis
Qhat = fg.norm(self.xtl.Cell.calculateQ(hkl)).reshape([-1,3]) # || Q
AxQ = fg.norm(np.cross(azim_zero, Qhat))
Ihat = fg.norm(np.cross(Qhat, AxQ)).reshape([-1,3]) # || to projection of azim_zero
Jhat = fg.norm(np.cross(Qhat, Ihat)).reshape([-1,3]) # _|_ to I and Q
# Rotate psi about Qhat
rpsi = np.deg2rad(psi)
# -ve sin makes clockwise rotation
# This was checked on 21/1/19 vs CRO paper + sergio's calculations and seems to agree with experiment,
# however we never did an azimuthal scan of the (103) which would have distinguished this completely.
Ihat_psi = fg.norm(np.cos(rpsi) * Ihat - np.sin(rpsi) * Jhat)
Jhat_psi = fg.norm(np.cross(Qhat, Ihat_psi))
return np.vstack([Ihat_psi, Jhat_psi, Qhat])
def print_scattering_coordinates(self,hkl,azim_zero=[1,0,0],psi=0):
"""
Transform magnetic vector into components within the scattering plane
***warning - may not be correct for non-cubic systems***
"""
# Define coordinate system I,J,Q (U1,U2,U3)
Qhat = fg.norm(self.xtl.Cell.calculateQ(hkl)) # || Q
AxQ = fg.norm(np.cross(azim_zero,Qhat))
Ihat = fg.norm(np.cross(Qhat,AxQ)) # || to azim_zero
Jhat = fg.norm(np.cross(Qhat,Ihat)) # -| to I and Q
# Rotate coordinate system by azimuth
Ihat_psi = fg.norm(np.cos(np.deg2rad(psi))*Ihat + np.sin(np.deg2rad(psi))*Jhat)
Jhat_psi = fg.norm(np.cross(Qhat,Ihat_psi))
# Determine components of the magnetic vector
U=np.vstack([Ihat_psi,Jhat_psi,Qhat])
print('U1 = (%5.2f,%5.2f,%5.2f)'%(U[0,0],U[0,1],U[0,2]))
print('U2 = (%5.2f,%5.2f,%5.2f)'%(U[1,0],U[1,1],U[1,2]))
print('U3 = (%5.2f,%5.2f,%5.2f)'%(U[2,0],U[2,1],U[2,2]))
def print_intensity(self, HKL):
"""
Print intensities calcualted in different ways
"""
HKL = np.asarray(np.rint(HKL),dtype=np.float).reshape([-1,3])
Qmag = self.xtl.Cell.Qmag(HKL)
srt = np.argsort(Qmag)
HKL = HKL[srt,:]
IN=self.neutron(HKL)
IX=self.x_ray(HKL)
INM=self.magnetic_neutron(HKL)*1e4
IXM=self.xray_magnetic(HKL)*1e4
IXRss=self.xray_resonant(HKL, None, 'ss')
IXRsp=self.xray_resonant(HKL, None, 'sp')
IXRps=self.xray_resonant(HKL, None, 'ps')
IXRpp=self.xray_resonant(HKL, None, 'pp')
fmt = '(%2.0f,%2.0f,%2.0f) %8.1f %8.1f %8.2f %8.2f ss=%8.2f sp=%8.2f ps=%8.2f pp=%8.2f'
print('( h, k, l) Neutron xray Magn. N Magn. XR sig-sig sig-pi pi-sig pi-pi')
for n in range(len(HKL)):
vals=(HKL[n][0],HKL[n][1],HKL[n][2],IN[n],IX[n],INM[n],IXM[n],IXRss[n],IXRsp[n],IXRps[n],IXRpp[n])
print(fmt%vals)
def old_intensity(self, HKL, scattering_type=None):
"""
Calculate the squared structure factor for the given HKL
Crystal.intensity([1,0,0])
Crystal.intensity([[1,0,0],[2,0,0],[3,0,0])
Returns an array with the same length as HKL, giving the real intensity at each reflection.
Notes:
- Uses x-ray atomic form factors, calculated from approximated tables in the ITC
- This may be a little slow for large numbers of reflections, as it is not currently
possible to use accelerated calculation methods in Jython.
- Debye-Waller factor (atomic displacement) is applied for isotropic ADPs
- Crystal.scale is used to scale the complex structure factor, so the intensity is
reduced by (Crystal.scale)^2
- Testing against structure factors calculated by Vesta.exe is very close, though there
are some discrepancies, probably due to the method of calculation of the form factor.
"""
if scattering_type is None:
scattering_type = self._scattering_type
scattering_type = scattering_type.lower()
# Break up long lists of HKLs
n_arrays = np.ceil(len(HKL)*len(self.xtl.Structure.u)/10000.)
hkl_array = np.array_split(HKL, n_arrays)
intensity = []
for _hkl in hkl_array:
if scattering_type in ['xray','x','x-ray','thomson','charge']:
intensity += self.x_ray(_hkl).tolist()
elif scattering_type in ['neutron','n','nuclear']:
intensity += self.neutron(_hkl).tolist()
elif scattering_type in ['xray magnetic','magnetic xray','spin xray','xray spin']:
intensity += list(self.xray_magnetic(_hkl)*1e4)
elif scattering_type in ['neutron magnetic','magnetic neutron','magnetic']:
intensity += list(self.magnetic_neutron(_hkl)*1e4)
elif scattering_type in ['xray dispersion']:
intensity += self.xray_dispersion(_hkl, self._energy_kev).tolist()
elif scattering_type in ['xray resonant','resonant','resonant xray','rxs']:
intensity += self.xray_resonant(_hkl).tolist()
elif scattering_type in ['xray resonant magnetic', 'xray magnetic resonant',
'resonant magnetic', 'magnetic resonant']:
intensity += self.xray_resonant_magnetic(
_hkl,
self._energy_kev,
self._azimuthal_reference,
self._azimuthal_angle,
self._polarisation,
F0=0, F1=1, F2=0).tolist()
elif scattering_type in ['xray nonresonant magnetic', 'xray magnetic nonresonant',
'nonresonant magnetic', 'magnetic nonresonant',
'xray non-resonant magnetic', 'xray magnetic non-resonant',
'non-resonant magnetic', 'magnetic non-resonant']:
intensity += self.xray_resonant_magnetic(
_hkl,
self._energy_kev,
self._azimuthal_reference,
self._azimuthal_angle,
self._polarisation).tolist()
else:
print('Scattering type not defined')
return np.array(intensity)
def old_structure_factor(self, HKL, scattering_type=None):
"""
Calculate the complex structure factor for the given HKL
Crystal.structure_factor([1,0,0])
Crystal.structure_factor([[1,0,0],[2,0,0],[3,0,0])
Returns an array with the same length as HKL, giving the complex structure factor at each reflection.
Notes:
- Uses x-ray atomic form factors, calculated from approximated tables in the ITC
- This may be a little slow for large numbers of reflections, as it is not currently
possible to use accelerated calculation methods in Jython.
- Debye-Waller factor (atomic displacement) is applied for isotropic ADPs
- Crystal.scale is used to scale the complex structure factor, so the intensity is
reduced by (Crystal.scale)^2
- Testing against structure factors calculated by Vesta.exe is very close, though there
are some discrepancies, probably due to the method of calculation of the form factor.
"""
prev_sf_setting = self._return_structure_factor
self._return_structure_factor = True
sf = self.intensity(HKL, scattering_type)
self._return_structure_factor = prev_sf_setting
return sf
def hkl(self, HKL, energy_kev=None):
""" Calculate the two-theta and intensity of the given HKL, display the result"""
if energy_kev is None:
energy_kev = self._energy_kev
HKL = np.asarray(np.rint(HKL),dtype=np.float).reshape([-1,3])
tth = self.xtl.Cell.tth(HKL,energy_kev)
inten = self.intensity(HKL)
print('Energy = %6.3f keV' % energy_kev)
print('( h, k, l) TwoTheta Intensity')
for n in range(len(tth)):
print('(%2.0f,%2.0f,%2.0f) %8.2f %9.2f' % (HKL[n,0],HKL[n,1],HKL[n,2],tth[n],inten[n]))
def hkl_reflection(self, HKL, energy_kev=None):
"""
Calculate the theta, two-theta and intensity of the given HKL in reflection geometry, display the result
Uses sample orientation set up in setup_scatter
:param HKL: [h,k,l] or list of hkl
:param energy_kev: None or float
:return: str
"""
if energy_kev is None:
energy_kev = self._energy_kev
HKL = np.asarray(np.rint(HKL),dtype=np.float).reshape([-1,3])
tth = self.xtl.Cell.tth(HKL,energy_kev)
theta = self.xtl.Cell.theta_reflection(HKL, energy_kev, self._scattering_specular_direction, self._scattering_theta_offset)
inten = self.intensity(HKL)
print('Energy = %6.3f keV' % energy_kev)
print('Specular Direction = (%1.0g,%1.0g,%1.0g)' %
(self._scattering_specular_direction[0],
self._scattering_specular_direction[1],
self._scattering_specular_direction[2]))
print('( h, k, l) Theta TwoTheta Intensity')
for n in range(len(tth)):
print('(%2.0f,%2.0f,%2.0f) %8.2f %8.2f %9.2f' %
(HKL[n, 0], HKL[n, 1], HKL[n, 2], theta[n], tth[n], inten[n]))
def hkl_transmission(self,HKL,energy_kev=None):
" Calculate the theta, two-theta and intensity of the given HKL in transmission geometry, display the result"
if energy_kev is None:
energy_kev = self._energy_kev
HKL = np.asarray(np.rint(HKL),dtype=np.float).reshape([-1,3])
tth = self.xtl.Cell.tth(HKL,energy_kev)
theta = self.xtl.Cell.theta_transmission(HKL, energy_kev, self._scattering_specular_direction,self._scattering_theta_offset)
inten = self.intensity(HKL)
print('Energy = %6.3f keV' % energy_kev)
print('Direction parallel to beam = (%1.0g,%1.0g,%1.0g)' %(self._scattering_parallel_direction[0],self._scattering_parallel_direction[1],self._scattering_parallel_direction[2]))
print('( h, k, l) Theta TwoTheta Intensity')
for n in range(len(tth)):
print('(%2.0f,%2.0f,%2.0f) %8.2f %8.2f %9.2f' % (HKL[n,0],HKL[n,1],HKL[n,2],theta[n],tth[n],inten[n]))
def generate_powder(self, q_max=8, peak_width=0.01, background=0, powder_average=True):
"""
Generates array of intensities along a spaced grid, equivalent to a powder pattern.
Q,I = generate_powder(energy_kev=8.0,peak_width=0.05,background=0)
q_max = maximum Q, in A-1
peak_width = width of convolution, in A-1
background = average of normal background
powder_average = True*/False, apply the powder averaging correction
Returns:
Q = [1000x1] array of wave-vector values
I | |
given number of items per page
:param str item: The name of the item type whose inventory pages to calculate
:param int maxPerPage: The maximum number of items that may be present on a single page of items (TODO: Add a default)
:return: The number of pages of size maxPerPage needed to display all of the user's inactive items of the named type
:rtype: int
:raise ValueError: When requesting an invalid item type
:raise NotImplementedError: When requesting a valid item type, but one that is not yet implemented (e.g commodity)
"""
if item not in bbConfig.validItemNames:
raise ValueError("Requested an invalid item name: " + item)
numWeapons = self.inactiveWeapons.numKeys
numModules = self.inactiveModules.numKeys
numTurrets = self.inactiveTurrets.numKeys
numShips = self.inactiveShips.numKeys
itemsNum = 0
if item == "all":
itemsNum = max(numWeapons, numModules, numTurrets, numShips)
elif item == "module":
itemsNum = numModules
elif item == "weapon":
itemsNum = numWeapons
elif item == "turret":
itemsNum = numTurrets
elif item == "ship":
itemsNum = numShips
else:
raise NotImplementedError("Valid but unsupported item name: " + item)
return int(itemsNum/maxPerPage) + (0 if itemsNum % maxPerPage == 0 else 1)
def lastItemNumberOnPage(self, item : str, pageNum : int, maxPerPage : int) -> int:
"""Get index of the last item on the given page number, where page numbers are of size maxPerPage.
This is an absolute index from the start of the inventory, not a relative index from the start of the page.
:param str item: The name of the item type whose last index to calculate
:param int maxPerPage: The maximum number of items that may be present on a single page of items (TODO: Add a default)
:return: The index of the last item on page pageNum, where page numbers are of size maxPerPage
:rtype: int
:raise ValueError: When requesting an invalid item type
:raise NotImplementedError: When requesting a valid item type, but one that is not yet implemented (e.g commodity)
"""
if item not in bbConfig.validItemNames:
raise ValueError("Requested an invalid item name: " + item)
if pageNum < self.numInventoryPages(item, maxPerPage):
return pageNum * maxPerPage
elif item == "ship":
return self.inactiveShips.numKeys
elif item == "weapon":
return self.inactiveWeapons.numKeys
elif item == "module":
return self.inactiveModules.numKeys
elif item == "turret":
return self.inactiveTurrets.numKeys
else:
raise NotImplementedError("Valid but unsupported item name: " + item)
def unequipAll(self, ship : bbShip.bbShip):
"""Unequip all items from the given bbShip, and move them into the user's inactive items ('hangar')
The user must own ship.
:param bbShip ship: the ship whose items to transfer to storage
:raise TypeError: When given any other type than bbShip
:raise RuntimeError: when given a bbShip that is not owned by this user
"""
if not type(ship) == bbShip.bbShip:
raise TypeError("Can only unequipAll from a bbShip. Given " + str(type(ship)))
if not (self.activeShip == ship or ship in self.inactiveShips):
raise RuntimeError("Attempted to unequipAll on a ship that isnt owned by this bbUser")
for weapon in ship.weapons:
self.inactiveWeapons.addItem(weapon)
ship.clearWeapons()
for module in ship.modules:
self.inactiveModules.addItem(module)
ship.clearModules()
for turret in ship.turrets:
self.inactiveTurrets.addItem(turret)
ship.clearTurrets()
def validateLoadout(self):
"""Ensure that the user's active loadout complies with bbModuleFactory.maxModuleTypeEquips
This method was written as a transferal measure when maxModuleTypeEquips was first released, and should seldom be used
"""
incompatibleModules = []
allModulesChecked = False
for currentModule in self.activeShip.modules:
if not self.activeShip.canEquipModuleType(currentModule.getType()):
incompatibleModules.append(currentModule)
self.activeShip.unequipModuleObj(currentModule)
finalModules = []
for currentModule in incompatibleModules:
if self.activeShip.canEquipModuleType(currentModule.getType()):
self.activeShip.equipModule(currentModule)
else:
finalModules.append(currentModule)
for currentModule in finalModules:
self.inactiveModules.addItem(currentModule)
def equipShipObj(self, ship : bbShip.bbShip, noSaveActive=False):
"""Equip the given ship, replacing the active ship.
Give noSaveActive=True to delete the currently equipped ship.
:param bbShip ship: The ship to equip. Must be owned by this user
:param bool noSaveActive: Give True to delete the currently equipped ship. Give False to move the active ship to the hangar. (Default False)
:raise RuntimeError: When given a bbShip that is not owned by this user
"""
if not (self.activeShip == ship or ship in self.inactiveShips):
raise RuntimeError("Attempted to equip a ship that isnt owned by this bbUser")
if not noSaveActive and self.activeShip is not None:
self.inactiveShips.addItem(self.activeShip)
if ship in self.inactiveShips:
self.inactiveShips.removeItem(ship)
self.activeShip = ship
def equipShipIndex(self, index : int):
"""Equip the ship at the given index in the user's inactive ships
:param int index: The index from the user's inactive ships of the requested ship
:raise IndexError: When given an index that is out of range of the user's inactive ships
"""
if not (0 <= index <= self.inactiveShips.numKeys - 1):
raise IndexError("Index out of range")
if self.activeShip is not None:
self.inactiveShips.addItem(self.activeShip)
self.activeShip = self.inactiveShips[index]
self.inactiveShips.removeItem(self.activeShip)
def toDictNoId(self) -> dict:
"""Serialize this bbUser to a dictionary representation for saving to file.
:return: A dictionary containing all information needed to recreate this user
:rtype: dict
"""
inactiveShipsDict = []
for ship in self.inactiveShips.keys:
inactiveShipsDict.append(self.inactiveShips.items[ship].toDict())
inactiveModulesDict = []
for module in self.inactiveModules.keys:
inactiveModulesDict.append(self.inactiveModules.items[module].toDict())
inactiveWeaponsDict = []
for weapon in self.inactiveWeapons.keys:
inactiveWeaponsDict.append(self.inactiveWeapons.items[weapon].toDict())
inactiveTurretsDict = []
for turret in self.inactiveTurrets.keys:
inactiveTurretsDict.append(self.inactiveTurrets.items[turret].toDict())
alerts = {}
for alertID in self.userAlerts.keys():
if isinstance(self.userAlerts[alertID], UserAlerts.StateUserAlert):
alerts[alertID] = self.userAlerts[alertID].state
return {"credits":self.credits, "lifetimeCredits":self.lifetimeCredits,
"bountyCooldownEnd":self.bountyCooldownEnd, "systemsChecked":self.systemsChecked,
"bountyWins":self.bountyWins, "activeShip": self.activeShip.toDict(), "inactiveShips":inactiveShipsDict,
"inactiveModules":inactiveModulesDict, "inactiveWeapons":inactiveWeaponsDict, "inactiveTurrets": inactiveTurretsDict, "lastSeenGuildId":self.lastSeenGuildId,
"duelWins": self.duelWins, "duelLosses": self.duelLosses, "duelCreditsWins": self.duelCreditsWins, "duelCreditsLosses": self.duelCreditsLosses,
"bountyWinsToday": self.bountyWinsToday, "dailyBountyWinsReset": self.dailyBountyWinsReset.timestamp(), "pollOwned": self.pollOwned}
def userDump(self) -> str:
"""Get a string containing key information about the user.
:return: A string containing the user ID, credits, lifetimeCredits, bountyCooldownEnd, systemsChecked and bountyWins
:rtype: str
"""
data = "bbUser #" + str(self.id) + ": "
for att in [self.credits, self.lifetimeCredits, self.bountyCooldownEnd, self.systemsChecked, self.bountyWins]:
data += str(att) + "/"
return data[:-1]
def getStatByName(self, stat : str) -> Union[int, float]:
"""Get a user attribute by its string name. This method is primarily used in leaderboard generation.
:param str stat: One of id, credits, lifetimeCredits, bountyCooldownEnd, systemsChecked, bountyWins or value
:return: The requested user attribute
:rtype: int or float
:raise ValueError: When given an invalid stat name
"""
if stat == "id":
return self.id
elif stat == "credits":
return self.credits
elif stat == "lifetimeCredits":
return self.lifetimeCredits
elif stat == "bountyCooldownEnd":
return self.bountyCooldownEnd
elif stat == "systemsChecked":
return self.systemsChecked
elif stat == "bountyWins":
return self.bountyWins
elif stat == "value":
modulesValue = 0
for module in self.inactiveModules.keys:
modulesValue += self.inactiveModules.items[module].count * module.getValue()
turretsValue = 0
for turret in self.inactiveTurrets.keys:
turretsValue += self.inactiveTurrets.items[turret].count * turret.getValue()
weaponsValue = 0
for weapon in self.inactiveWeapons.keys:
weaponsValue += self.inactiveWeapons.items[weapon].count * weapon.getValue()
shipsValue = 0
for ship in self.inactiveShips.keys:
shipsValue += self.inactiveShips.items[ship].count * ship.getValue()
return modulesValue + turretsValue + weaponsValue + shipsValue + self.activeShip.getValue() + self.credits
else:
raise ValueError("Unknown stat name: " + str(stat))
def getInactivesByName(self, item : str) -> bbInventory:
"""Get the all of the user's inactive (hangar) items of the named type.
The given bbInventory is mutable, and can alter the contents of the user's inventory.
:param str item: One of ship, weapon, module or turret
:return: A bbInventory containing all of the user's inactive items of the named type.
:rtype: bbInventory
:raise ValueError: When requesting an invalid item type name
:raise NotImplementedError: When requesting a valid item type name, but one that is not yet implemented (e.g commodity)
"""
if item == "all" or item not in bbConfig.validItemNames:
raise ValueError("Invalid item type: " + item)
if item == "ship":
return self.inactiveShips
if item == "weapon":
return self.inactiveWeapons
if item == "module":
return self.inactiveModules
if item == "turret":
return self.inactiveTurrets
else:
raise NotImplementedError("Valid, but unrecognised item type: " + item)
def hasDuelChallengeFor(self, targetBBUser : bbUser) -> bool:
"""Decide whether or not this user has an active duel request targetted at the given bbUser
:param bbUser targetBBUser: The user to check for duel request existence
:return: True if this user has sent a duel request to the given user, and it is still active. False otherwise
:rtype: bool
"""
return targetBBUser in self.duelRequests
def addDuelChallenge(self, duelReq : DuelRequest.DuelRequest):
"""Store a new duel request from this user to another.
The duel request must still be active (TODO: Add validation), | |
cas = settings.login_form
cas_user = cas.get_user()
if cas_user:
cas_user[passfield] = None
# Ensure new users go through their post registration tasks
register_onaccept = settings.register_onaccept
if register_onaccept:
settings.register_onaccept = \
[self.s3_register_onaccept,
register_onaccept, # Used by DRRPP
]
else:
settings.register_onaccept = self.s3_register_onaccept
user = self.get_or_create_user(utable._filter_fields(cas_user))
elif hasattr(cas, "login_form"):
return cas.login_form()
else:
# We need to pass through login again before going on
if next is DEFAULT:
next = request.vars._next or deployment_settings.get_auth_login_next()
next = "%s?_next=%s" % (URL(r = request), next)
redirect(cas.login_url(next))
# Process authenticated users
if user:
user = Storage(utable._filter_fields(user, id=True))
self.login_user(user)
if log and self.user:
self.log_event(log, self.user)
# How to continue
if next is DEFAULT:
is_admin = self.s3_has_role("ADMIN")
if is_admin:
# Setup
if deployment_settings.has_module("setup") and \
deployment_settings.get_setup_wizard_questions():
itable = current.s3db.setup_instance
instance = db(itable.url == "https://%s" % request.env.HTTP_HOST).select(itable.id,
itable.deployment_id,
itable.configured,
limitby = (0, 1),
).first()
if instance and not instance.configured:
# Run Configuration Wizard
next = URL(c="setup", f="deployment",
args = [instance.deployment_id, "instance", instance.id, "wizard"],
)
elif accepted_form:
# Check for pending consent upon login?
pending_consent = deployment_settings.get_auth_consent_check()
if callable(pending_consent):
pending_consent = pending_consent()
if pending_consent:
next = URL(c="default", f="user",
args = ["consent"],
)
if next is DEFAULT:
if deployment_settings.get_auth_login_next_always():
next = deployment_settings.get_auth_login_next()
if callable(next):
next = next()
else:
next = request.vars.get("_next")
if not next:
next = deployment_settings.get_auth_login_next()
if callable(next):
next = next()
if settings.login_form == self:
if accepted_form:
if onaccept:
onaccept(form)
if isinstance(next, (list, tuple)):
# fix issue with 2.6/2.7
next = next[0]
if next and not next[0] == "/" and next[:4] != "http":
next = self.url(next.replace("[id]", str(form.vars.id)))
redirect(next)
utable[userfield].requires = old_requires
return form
else:
redirect(next)
# -------------------------------------------------------------------------
def change_password(self,
next = DEFAULT,
onvalidation = DEFAULT,
onaccept = DEFAULT,
log = DEFAULT,
):
"""
Returns:
a form that lets the user change password
"""
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side = self.settings.client_side)
messages = self.messages
settings = self.settings
utable = settings.table_user
s = self.db(utable.id == self.user.id)
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or settings.change_password_next
if onvalidation is DEFAULT:
onvalidation = settings.change_password_onvalidation
if onaccept is DEFAULT:
onaccept = settings.change_password_onaccept
if log is DEFAULT:
log = messages["change_password_log"]
passfield = settings.password_field
form = SQLFORM.factory(
Field("old_password", "password",
label = messages.old_password,
# No minimum length for old password
#requires = utable[passfield].requires,
requires = CRYPT(key = settings.hmac_key,
digest_alg = "sha512",
),
),
Field("new_password", "password",
label = messages.new_password,
requires = utable[passfield].requires,
),
Field("new_password2", "password",
label = messages.verify_password,
requires = [IS_EXPR("value==%s" % repr(request.vars.new_password),
messages.mismatched_password,
),
],
),
submit_button = messages.password_change_button,
hidden = {"_next": next},
formstyle = current.deployment_settings.get_ui_formstyle(),
separator = settings.label_separator
)
form.add_class("auth_change_password")
if form.accepts(request, session,
formname = "change_password",
onvalidation = onvalidation,
hideerror = settings.hideerror,
):
if not form.vars["old_password"] == s.select(utable[passfield],
limitby = (0, 1),
orderby_on_limitby = False,
).first()[passfield]:
form.errors["old_password"] = messages.invalid_password
else:
d = {passfield: str(form.vars.new_password)}
s.update(**d)
session.confirmation = messages.password_changed
self.log_event(log, self.user)
callback(onaccept, form)
if not next:
next = self.url(args = request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=settings.client_side)
return form
# -------------------------------------------------------------------------
def reset_password(self,
next = DEFAULT,
onvalidation = DEFAULT,
onaccept = DEFAULT,
log = DEFAULT,
):
"""
Returns:
a form to reset the user password, overrides web2py's
version of the method to not swallow the _next var.
"""
table_user = self.table_user()
request = current.request
session = current.session
messages = self.messages
settings = self.settings
if next is DEFAULT:
next = self.get_vars_next() or settings.reset_password_next
if settings.prevent_password_reset_attacks:
key = request.vars.key
if key:
session._reset_password_key = key
session._reset_password_next = next
redirect(self.url(args = "reset_password"))
else:
key = session._reset_password_key
next = session._reset_password_next
else:
key = request.vars.key
try:
t0 = int(key.split('-')[0])
if time.time() - t0 > 60 * 60 * 24:
raise Exception
user = table_user(reset_password_key=key)
if not user:
raise Exception
except Exception:
session.flash = messages.invalid_reset_password
redirect(next, client_side=settings.client_side)
key = user.registration_key
if key in ("pending", "disabled", "blocked") or (key or "").startswith("pending"):
session.flash = messages.registration_pending
redirect(next, client_side=settings.client_side)
if onvalidation is DEFAULT:
onvalidation = settings.reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = settings.reset_password_onaccept
passfield = settings.password_field
form = SQLFORM.factory(
Field("new_password", "password",
label = messages.new_password,
requires = table_user[passfield].requires,
),
Field("new_password2", "password",
label = messages.verify_password,
requires = IS_EXPR("value==%s" % repr(request.vars.new_password),
messages.mismatched_password,
),
),
submit_button = messages.password_change_button,
hidden = {"_next": next},
formstyle = current.deployment_settings.get_ui_formstyle(),
separator = settings.label_separator
)
if form.accepts(request, session,
onvalidation = onvalidation,
hideerror = settings.hideerror,
):
user.update_record(
**{passfield: str(form.vars.new_password),
"registration_key": "",
"reset_password_key": "",
})
session.flash = messages.password_changed
if settings.login_after_password_change:
user = Storage(table_user._filter_fields(user, id=True))
self.login_user(user)
callback(onaccept, form)
redirect(next, client_side=settings.client_side)
return form
# -------------------------------------------------------------------------
def request_reset_password(self,
next = DEFAULT,
onvalidation = DEFAULT,
onaccept = DEFAULT,
log = DEFAULT,
):
"""
Returns a form to reset the user password, overrides web2py's
version of the method to apply Eden formstyles.
Args:
next: URL to redirect to after successful form submission
onvalidation: callback to validate password reset form
onaccept: callback to post-process password reset request
log: event description for the log (string)
"""
messages = self.messages
settings = self.settings
if not settings.mailer:
current.response.error = messages.function_disabled
return ""
utable = settings.table_user
request = current.request
session = current.session
captcha = settings.retrieve_password_captcha or \
(settings.retrieve_password_captcha != False and settings.captcha)
if next is DEFAULT:
next = self.get_vars_next() or settings.request_reset_password_next
if onvalidation is DEFAULT:
onvalidation = settings.reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = settings.reset_password_onaccept
if log is DEFAULT:
log = messages["reset_password_log"]
userfield = settings.login_userfield
if userfield == "email":
utable.email.requires = [
IS_EMAIL(error_message=messages.invalid_email),
IS_IN_DB(self.db, utable.email,
error_message = messages.invalid_email,
),
]
else:
utable[userfield].requires = [
IS_IN_DB(self.db, utable[userfield],
error_message = messages.invalid_username,
),
]
form = SQLFORM(utable,
fields = [userfield],
hidden = {"_next": next},
showid = settings.showid,
submit_button = messages.password_reset_button,
delete_label = messages.delete_label,
formstyle = current.deployment_settings.get_ui_formstyle(),
separator = settings.label_separator
)
form.add_class("auth_reset_password")
if captcha:
s3_addrow(form, captcha.label, captcha,
captcha.comment, settings.formstyle, "captcha__row")
if form.accepts(request, session if self.csrf_prevention else None,
formname = "reset_password",
dbio = False,
onvalidation = onvalidation,
hideerror = settings.hideerror,
):
user = utable(**{userfield:form.vars.get(userfield)})
if not user:
session.error = messages["invalid_%s" % userfield]
redirect(self.url(args = request.args),
client_side = settings.client_side)
elif user.registration_key in ("pending", "disabled", "blocked"):
session.warning = messages.registration_pending
redirect(self.url(args=request.args),
client_side=settings.client_side)
if self.email_reset_password(user):
session.confirmation = messages.email_sent
else:
session.error = messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=settings.client_side)
# old_requires = utable.email.requires
return form
# -------------------------------------------------------------------------
def login_user(self, user):
"""
Log the user in
- common function called by login() & register()
"""
db = current.db
deployment_settings = current.deployment_settings
request = current.request
session = current.session
settings = self.settings
req_vars = request.vars
session.auth = Storage(
user = user,
last_visit = request.now,
expiration = req_vars.get("remember", False) and \
settings.long_expiration or settings.expiration,
remember = "remember" in req_vars,
hmac_key = <KEY>()
)
self.user = user
self.s3_set_roles()
# Set a Cookie to present user with login box by default
self.set_cookie()
# Read their language from the Profile
language = user.language
current.T.force(language)
session.s3.language = language
session.confirmation = self.messages.logged_in
# Update the timestamp of the User so we know when they last logged-in
utable = settings.table_user
db(utable.id == self.user.id).update(timestmp = request.utcnow)
# Set user's position
# @ToDo: Per-User settings
client_location = req_vars.get("auth_user_clientlocation")
if deployment_settings.get_auth_set_presence_on_login() and client_location:
position = client_location.split("|", 3)
userlat = float(position[0])
userlon = float(position[1])
accuracy = float(position[2]) / 1000 # Ensures accuracy is in km
closestpoint = 0
closestdistance = 0
gis = current.gis
# @ToDo: Filter to just Sites & Home Addresses?
locations = gis.get_features_in_radius(userlat, userlon, accuracy)
ignore_levels_for_presence = deployment_settings.get_auth_ignore_levels_for_presence()
greatCircleDistance = gis.greatCircleDistance
for location in locations:
if location.level not in ignore_levels_for_presence:
if closestpoint != 0:
currentdistance = greatCircleDistance(closestpoint.lat,
closestpoint.lon,
location.lat,
location.lon)
if currentdistance < closestdistance:
closestpoint = location
closestdistance = currentdistance
else:
closestpoint = location
s3tracker = S3Tracker()
person_id = self.s3_logged_in_person()
if closestpoint == 0 and deployment_settings.get_auth_create_unknown_locations():
# There wasn't any near-by location, so create one
newpoint = {"lat": userlat,
"lon": userlon,
"name": "Waypoint"
}
closestpoint = current.s3db.gis_location.insert(**newpoint)
s3tracker(db.pr_person,
person_id).set_location(closestpoint,
timestmp = request.utcnow)
elif closestpoint != 0:
s3tracker(db.pr_person,
person_id).set_location(closestpoint,
timestmp = request.utcnow)
# -------------------------------------------------------------------------
def consent(self):
"""
Consent question form, e.g.
- when consent requires renewal, or
- new consent questions need to be asked, or
- user has been added by ADMIN and shall give consent upon login
- ...
Note:
This form cannot meaningfully prevent the | |
0,
"Unexpected value {} for NAT counter 'Packets'".format(nat_counters[entry]["Packets"]))
pytest_assert(int(nat_counters[entry]["Bytes"]) > 0,
"Unexpected value {} for NAT counter 'Bytes'".format(nat_counters[entry]["Bytes"]))
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type)
@pytest.mark.nat_static
def test_nat_crud_static_nat(self, ptfhost, tbinfo, duthost, ptfadapter, setup_test_env,
protocol_type):
entries_table = {}
expected_error = "KeyError: \'{}\'"
interface_type, setup_info = setup_test_env
setup_data = copy.deepcopy(setup_info)
direction = 'host-tor'
nat_type = 'static_nat'
# Define network data
src_port, dst_port = get_l4_default_ports(protocol_type)
# Set NAT configuration for test
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
# Set NAT configuration for test
apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_data, network_data, direction, interface_type, nat_type,
network_data.public_ip, network_data.private_ip, protocol_type=protocol_type, nat_entry=nat_type, handshake=True)
# Create with CLI
crud_create = {"create": {"action": "add", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip}}
entries_table.update(crud_operations_basic(duthost, crud_create))
# Read from running config and check
nat_rules_config = json.loads(duthost.command("sudo sonic-cfggen -d --var-json {}".format(STATIC_NAT_TABLE_NAME))["stdout"])
pytest_assert(nat_rules_config[network_data.public_ip] == entries_table[network_data.public_ip],
"Unexpected NAT rule configuration for {}."
" Actual: {}."
" Expected: {}".format(network_data.public_ip,
nat_rules_config[network_data.public_ip],
entries_table[network_data.public_ip]))
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type)
# Update with CLI
crud_remove = {"remove": {"action": "remove", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip}}
entries_table.update(crud_operations_basic(duthost, crud_remove))
# Read
nat_rules_config = exec_command(duthost,
["sudo sonic-cfggen -d --var-json {}".format(STATIC_NAT_TABLE_NAME)])
condition = (expected_error.format(STATIC_NAT_TABLE_NAME) in nat_rules_config['stderr_lines'] or nat_rules_config['stdout'] == '')
pytest_assert(condition,
"Unexpected error for deleted static NAT rule: {}".format(nat_rules_config['stderr_lines']))
# Traffic send and check that NAT translation will not be performed for SNAT(host-tor)
generate_and_verify_not_translated_traffic(ptfadapter, setup_data, interface_type, 'host-tor', protocol_type, nat_type=nat_type)
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type, second_port=True)
crud_create = {"create": {"action": "add", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip}}
entries_table.update(crud_operations_basic(duthost, crud_create))
# Read from running config and check
nat_rules_config = json.loads(duthost.command("sudo sonic-cfggen "
"-d --var-json {}".format(STATIC_NAT_TABLE_NAME))["stdout"])
pytest_assert(nat_rules_config[network_data.public_ip] == entries_table[network_data.public_ip],
"Unexpected NAT rule configuration for {}."
" Actual: {}."
" Expected: {}".format(network_data.public_ip,
nat_rules_config[network_data.public_ip],
entries_table[network_data.public_ip]))
# Perform TCP handshake from leaf-tor
perform_handshake(ptfhost, setup_data, protocol_type, direction,
network_data.exp_src_ip, dst_port,
network_data.exp_dst_ip, src_port,
network_data.public_ip)
# Traffic send and check that NAT translation will not be performed for SNAT(host-tor)
generate_and_verify_not_translated_traffic(ptfadapter, setup_data, interface_type, 'host-tor', protocol_type, nat_type=nat_type)
# Remove with CLI
crud_remove = {"remove": {"action": "remove", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip}}
entries_table.update(crud_operations_basic(duthost, crud_remove))
# Read
nat_rules_config = exec_command(duthost,
["sudo sonic-cfggen -d --var-json {}".format(STATIC_NAT_TABLE_NAME)])
condition = (expected_error.format(STATIC_NAT_TABLE_NAME) in nat_rules_config['stderr_lines'] or nat_rules_config['stdout'] == '')
pytest_assert(condition, "Unexpected error for deleted static basic NAT rule: {}".format(nat_rules_config['stderr_lines']))
# Traffic send and check that NAT translation will not be performed for SNAT(host-tor)
generate_and_verify_not_translated_traffic(ptfadapter, setup_data, interface_type, 'host-tor', protocol_type, nat_type=nat_type)
@pytest.mark.nat_static
def test_nat_crud_static_napt(self, ptfhost, tbinfo, duthost, ptfadapter, setup_test_env, protocol_type):
entries_table = {}
expected_error = "KeyError: \'{}\'"
interface_type, setup_info = setup_test_env
setup_data = copy.deepcopy(setup_info)
direction = 'leaf-tor'
nat_type = 'static_napt'
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
src_port, dst_port = get_l4_default_ports(protocol_type)
# Set NAT configuration for test
apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_data, network_data, direction, interface_type, nat_type,
network_data.public_ip, network_data.private_ip, protocol_type=protocol_type, nat_entry=nat_type, handshake=True)
# Create with CLI
crud_create = {"create": {"action": "add", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip,
"proto": protocol_type, "global_port": dst_port, "local_port": src_port
}
}
entries_table.update(crud_operations_napt(duthost, crud_create))
# Read from running config and check
nat_rules_config = json.loads(duthost.command("sudo sonic-cfggen "
"-d --var-json {}".format(STATIC_NAPT_TABLE_NAME))["stdout"])
key_entry = "{}|{}|{}".format(network_data.public_ip, protocol_type.upper(), dst_port)
pytest_assert(nat_rules_config[key_entry] == entries_table[key_entry],
"Unexpected NAPT rule for {}".format(key_entry))
# Perform TCP handshake from leaf-tor
perform_handshake(ptfhost, setup_data, protocol_type, direction,
network_data.exp_src_ip, dst_port,
network_data.exp_dst_ip, src_port,
network_data.public_ip)
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type)
# Update with CLI
crud_remove = {"remove": {"action": "remove", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip,
"proto": protocol_type, "global_port": dst_port, "local_port": src_port
}
}
entries_table.update(crud_operations_napt(duthost, crud_remove))
# Read
nat_rules_config = exec_command(duthost,
["sudo sonic-cfggen -d --var-json {}".format(STATIC_NAPT_TABLE_NAME)])
condition = (expected_error.format(STATIC_NAPT_TABLE_NAME) in nat_rules_config['stderr_lines'] or nat_rules_config['stdout'] == '')
pytest_assert(condition,
"Unexpected error for deleted static NAPT rule")
# Traffic send and check that NAT translation will not be performed for SNAT(host-tor)
generate_and_verify_not_translated_traffic(ptfadapter, setup_data, interface_type, 'host-tor', protocol_type, nat_type=nat_type)
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type, second_port=True)
crud_create = {"create": {"action": "add", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip,
"proto": protocol_type, "global_port": dst_port, "local_port": src_port
}
}
entries_table.update(crud_operations_napt(duthost, crud_create))
# Read from running config and check
nat_rules_config = json.loads(duthost.command("sudo sonic-cfggen "
"-d --var-json {}".format(STATIC_NAPT_TABLE_NAME))["stdout"])
key_entry = "{}|{}|{}".format(network_data.public_ip, protocol_type.upper(), dst_port)
pytest_assert(nat_rules_config[key_entry] == entries_table[key_entry],
"Unexpected NAT rule for {}".format(key_entry))
# Perform TCP handshake from leaf-tor
perform_handshake(ptfhost, setup_data, protocol_type, direction,
network_data.exp_src_ip, dst_port,
network_data.exp_dst_ip, src_port,
network_data.public_ip)
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type, second_port=True)
# Remove with CLI
crud_remove = {"remove": {"action": "remove", "global_ip": network_data.public_ip, "local_ip": network_data.private_ip,
"proto": protocol_type, "global_port": dst_port, "local_port": src_port
}
}
entries_table.update(crud_operations_napt(duthost, crud_remove))
# Read
nat_rules_config = exec_command(duthost,
["sudo sonic-cfggen -d --var-json {}".format(STATIC_NAPT_TABLE_NAME)])
condition = (expected_error.format(STATIC_NAPT_TABLE_NAME) in nat_rules_config['stderr_lines'] or nat_rules_config['stdout'] == '')
pytest_assert(condition, "Unexpected error for deleted static NAPT rule")
# Traffic send and check that NAT translation will not be performed for SNAT(host-tor)
generate_and_verify_not_translated_traffic(ptfadapter, setup_data, interface_type, 'host-tor', protocol_type, nat_type=nat_type, second_port=True)
@pytest.mark.nat_static
@pytest.mark.parametrize("reboot_type", ['cold', 'fast'])
def test_nat_reboot_static_basic(self, ptfhost, tbinfo, duthost, ptfadapter, setup_test_env, protocol_type,
reboot_type, localhost, reload_dut_config):
interface_type, setup_info = setup_test_env
setup_data = copy.deepcopy(setup_info)
direction = 'host-tor'
nat_type = 'static_nat'
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
src_port, dst_port = get_l4_default_ports(protocol_type)
# Set NAT configuration for test
apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_data, network_data, direction, interface_type, nat_type,
network_data.public_ip, network_data.private_ip, protocol_type=protocol_type, nat_entry=nat_type, handshake=True)
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type)
# Save current configuration
duthost.command('sudo config save -y')
# Reboot
common_reboot(duthost, localhost, reboot_type=reboot_type, delay=10,
timeout=REBOOT_MAP[reboot_type]["timeout"], wait=120)
# Perform handshake from host-tor
perform_handshake(ptfhost, setup_data, protocol_type, direction,
network_data.ip_dst, dst_port,
network_data.ip_src, src_port,
network_data.public_ip)
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type)
@pytest.mark.nat_static
@pytest.mark.parametrize("reboot_type", ['cold', 'fast'])
def test_nat_reboot_static_napt(self, ptfhost, tbinfo, duthost, ptfadapter, setup_test_env, protocol_type,
reboot_type, localhost, reload_dut_config):
interface_type, setup_info = setup_test_env
setup_data = copy.deepcopy(setup_info)
direction = 'leaf-tor'
nat_type = 'static_napt'
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
src_port, dst_port = get_l4_default_ports(protocol_type)
# Set NAT configuration for test
apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_data, network_data, direction, interface_type, nat_type,
network_data.public_ip, network_data.private_ip, protocol_type=protocol_type, nat_entry=nat_type, handshake=True)
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type)
# Save current configuration
duthost.command('sudo config save -y')
# Reboot
common_reboot(duthost, localhost, reboot_type=reboot_type, delay=10,
timeout=REBOOT_MAP[reboot_type]["timeout"], wait=120)
# set_arp entries
check_peers_by_ping(duthost)
# Perform TCP handshake from leaf-tor
perform_handshake(ptfhost, setup_data, protocol_type, direction,
network_data.exp_src_ip, dst_port,
network_data.exp_dst_ip, src_port,
network_data.public_ip)
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type)
@pytest.mark.nat_static
def test_nat_static_zones_basic_snat(self, ptfhost, tbinfo, duthost, ptfadapter, setup_test_env,
protocol_type):
# Prepare configuration for NAT zones modify test
interface_type, setup_info = setup_test_env
setup_data = copy.deepcopy(setup_info)
# Prepare configuration for NAT zones negative test
setup_info_negative_zones = copy.deepcopy(setup_info)
for key in setup_info_negative_zones['interfaces_nat_zone']:
setup_info_negative_zones['interfaces_nat_zone'][key]['zone_id'] = 1
direction = 'host-tor'
nat_type = 'static_nat'
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
src_port, dst_port = get_l4_default_ports(protocol_type)
apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_info_negative_zones, network_data, direction, interface_type, nat_type,
network_data.public_ip, network_data.private_ip, protocol_type=protocol_type, nat_entry=nat_type, handshake=True)
# Traffic send and check that NAT translation will not be performed for SNAT(host-tor)
generate_and_verify_not_translated_traffic(ptfadapter, setup_info_negative_zones, interface_type, 'host-tor', protocol_type, nat_type=nat_type)
# Check static NAT when all NAT interfaces zones are corect
nat_zones_config(duthost, setup_data, interface_type)
# Perform TCP handshake from host-tor
perform_handshake(ptfhost, setup_data, protocol_type, direction,
network_data.ip_dst, dst_port,
network_data.ip_src, src_port,
network_data.public_ip)
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_traffic(duthost, ptfadapter, setup_data, interface_type, path, protocol_type, nat_type=nat_type)
@pytest.mark.nat_static
def test_nat_static_zones_basic_icmp_snat(self, tbinfo, ptfhost, duthost, ptfadapter, setup_test_env):
# Prepare configuration for NAT zones modify test
interface_type, setup_info = setup_test_env
setup_data = copy.deepcopy(setup_info)
# Prepare configuration for NAT zones negative test
setup_info_negative_zones = copy.deepcopy(setup_info)
for key in setup_info_negative_zones['interfaces_nat_zone']:
setup_info_negative_zones['interfaces_nat_zone'][key]['zone_id'] = 1
direction = 'host-tor'
nat_type = 'static_nat'
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
# Set NAT configuration for test
apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_info_negative_zones, network_data, direction, interface_type, nat_type,
network_data.public_ip, network_data.private_ip, protocol_type="ICMP", nat_entry=nat_type)
# Traffic send and check that NAT translation will not perform for SNAT(host-tor)
generate_and_verify_not_translated_icmp_traffic(ptfadapter, setup_data, interface_type, direction, nat_type)
# Check static NAT when all NAT interfaces zones are corect
nat_zones_config(duthost, setup_data, interface_type)
# Send bidirectional traffic
for path in DIRECTION_PARAMS:
generate_and_verify_icmp_traffic(ptfadapter, setup_data, interface_type, path, nat_type)
@pytest.mark.nat_static
| |
email_text = '''
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML 1.0 Transitional //EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<!--[if IE]><html xmlns="http://www.w3.org/1999/xhtml" class="ie"><![endif]-->
<!--[if !IE]><!-->
<html style="margin: 0;padding: 0;" xmlns="http://www.w3.org/1999/xhtml">
<!--<![endif]-->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title></title>
<!--[if !mso]><!-->
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<!--<![endif]-->
<meta name="viewport" content="width=device-width" />
<style type="text/css">
@media only screen and (min-width: 620px) {
.wrapper {
min-width: 600px !important
}
.wrapper h1 {}
.wrapper h1 {
font-size: 30px !important;
line-height: 38px !important
}
.wrapper h2 {}
.wrapper h2 {
font-size: 24px !important;
line-height: 32px !important
}
.wrapper h3 {}
.wrapper h3 {
font-size: 20px !important;
line-height: 28px !important
}
.column {}
.wrapper .size-8 {
font-size: 8px !important;
line-height: 14px !important
}
.wrapper .size-9 {
font-size: 9px !important;
line-height: 16px !important
}
.wrapper .size-10 {
font-size: 10px !important;
line-height: 18px !important
}
.wrapper .size-11 {
font-size: 11px !important;
line-height: 19px !important
}
.wrapper .size-12 {
font-size: 12px !important;
line-height: 19px !important
}
.wrapper .size-13 {
font-size: 13px !important;
line-height: 21px !important
}
.wrapper .size-14 {
font-size: 14px !important;
line-height: 21px !important
}
.wrapper .size-15 {
font-size: 15px !important;
line-height: 23px !important
}
.wrapper .size-16 {
font-size: 16px !important;
line-height: 24px !important
}
.wrapper .size-17 {
font-size: 17px !important;
line-height: 26px !important
}
.wrapper .size-18 {
font-size: 18px !important;
line-height: 26px !important
}
.wrapper .size-20 {
font-size: 20px !important;
line-height: 28px !important
}
.wrapper .size-22 {
font-size: 22px !important;
line-height: 31px !important
}
.wrapper .size-24 {
font-size: 24px !important;
line-height: 32px !important
}
.wrapper .size-26 {
font-size: 26px !important;
line-height: 34px !important
}
.wrapper .size-28 {
font-size: 28px !important;
line-height: 36px !important
}
.wrapper .size-30 {
font-size: 30px !important;
line-height: 38px !important
}
.wrapper .size-32 {
font-size: 32px !important;
line-height: 40px !important
}
.wrapper .size-34 {
font-size: 34px !important;
line-height: 43px !important
}
.wrapper .size-36 {
font-size: 36px !important;
line-height: 43px !important
}
.wrapper .size-40 {
font-size: 40px !important;
line-height: 47px !important
}
.wrapper .size-44 {
font-size: 44px !important;
line-height: 50px !important
}
.wrapper .size-48 {
font-size: 48px !important;
line-height: 54px !important
}
.wrapper .size-56 {
font-size: 56px !important;
line-height: 60px !important
}
.wrapper .size-64 {
font-size: 64px !important;
line-height: 63px !important
}
}
</style>
<meta name="x-apple-disable-message-reformatting" />
<style type="text/css">
body {
margin: 0;
padding: 0;
}
table {
border-collapse: collapse;
table-layout: fixed;
}
* {
line-height: inherit;
}
[x-apple-data-detectors] {
color: inherit !important;
text-decoration: none !important;
}
.wrapper .footer__share-button a:hover,
.wrapper .footer__share-button a:focus {
color: #ffffff !important;
}
.btn a:hover,
.btn a:focus,
.footer__share-button a:hover,
.footer__share-button a:focus,
.email-footer__links a:hover,
.email-footer__links a:focus {
opacity: 0.8;
}
.preheader,
.header,
.layout,
.column {
transition: width 0.25s ease-in-out, max-width 0.25s ease-in-out;
}
.preheader td {
padding-bottom: 8px;
}
.layout,
div.header {
max-width: 400px !important;
-fallback-width: 95%% !important;
width: calc(100%% - 20px) !important;
}
div.preheader {
max-width: 360px !important;
-fallback-width: 90%% !important;
width: calc(100%% - 60px) !important;
}
.snippet,
.webversion {
Float: none !important;
}
.stack .column {
max-width: 400px !important;
width: 100%% !important;
}
.fixed-width.has-border {
max-width: 402px !important;
}
.fixed-width.has-border .layout__inner {
box-sizing: border-box;
}
.snippet,
.webversion {
width: 50%% !important;
}
.ie .btn {
width: 100%%;
}
.ie .stack .column,
.ie .stack .gutter {
display: table-cell;
float: none !important;
}
.ie div.preheader,
.ie .email-footer {
max-width: 560px !important;
width: 560px !important;
}
.ie .snippet,
.ie .webversion {
width: 280px !important;
}
.ie div.header,
.ie .layout {
max-width: 600px !important;
width: 600px !important;
}
.ie .two-col .column {
max-width: 300px !important;
width: 300px !important;
}
.ie .three-col .column,
.ie .narrow {
max-width: 200px !important;
width: 200px !important;
}
.ie .wide {
width: 400px !important;
}
.ie .stack.fixed-width.has-border,
.ie .stack.has-gutter.has-border {
max-width: 602px !important;
width: 602px !important;
}
.ie .stack.two-col.has-gutter .column {
max-width: 290px !important;
width: 290px !important;
}
.ie .stack.three-col.has-gutter .column,
.ie .stack.has-gutter .narrow {
max-width: 188px !important;
width: 188px !important;
}
.ie .stack.has-gutter .wide {
max-width: 394px !important;
width: 394px !important;
}
.ie .stack.two-col.has-gutter.has-border .column {
max-width: 292px !important;
width: 292px !important;
}
.ie .stack.three-col.has-gutter.has-border .column,
.ie .stack.has-gutter.has-border .narrow {
max-width: 190px !important;
width: 190px !important;
}
.ie .stack.has-gutter.has-border .wide {
max-width: 396px !important;
width: 396px !important;
}
.ie .fixed-width .layout__inner {
border-left: 0 none white !important;
border-right: 0 none white !important;
}
.ie .layout__edges {
display: none;
}
.mso .layout__edges {
font-size: 0;
}
.layout-fixed-width,
.mso .layout-full-width {
background-color: #ffffff;
}
@media only screen and (min-width: 620px) {
.column,
.gutter {
display: table-cell;
Float: none !important;
vertical-align: top;
}
div.preheader,
.email-footer {
max-width: 560px !important;
width: 560px !important;
}
.snippet,
.webversion {
width: 280px !important;
}
div.header,
.layout,
.one-col .column {
max-width: 600px !important;
width: 600px !important;
}
.fixed-width.has-border,
.fixed-width.x_has-border,
.has-gutter.has-border,
.has-gutter.x_has-border {
max-width: 602px !important;
width: 602px !important;
}
.two-col .column {
max-width: 300px !important;
width: 300px !important;
}
.three-col .column,
.column.narrow,
.column.x_narrow {
max-width: 200px !important;
width: 200px !important;
}
.column.wide,
.column.x_wide {
width: 400px !important;
}
.two-col.has-gutter .column,
.two-col.x_has-gutter .column {
max-width: 290px !important;
width: 290px !important;
}
.three-col.has-gutter .column,
.three-col.x_has-gutter .column,
.has-gutter .narrow {
max-width: 188px !important;
width: 188px !important;
}
.has-gutter .wide {
max-width: 394px !important;
width: 394px !important;
}
.two-col.has-gutter.has-border .column,
.two-col.x_has-gutter.x_has-border .column {
max-width: 292px !important;
width: 292px !important;
}
.three-col.has-gutter.has-border .column,
.three-col.x_has-gutter.x_has-border .column,
.has-gutter.has-border .narrow,
.has-gutter.x_has-border .narrow {
max-width: 190px !important;
width: 190px !important;
}
.has-gutter.has-border .wide,
.has-gutter.x_has-border .wide {
max-width: 396px !important;
width: 396px !important;
}
}
@supports (display: flex) {
@media only screen and (min-width: 620px) {
.fixed-width.has-border .layout__inner {
display: flex !important;
}
}
}
@media only screen and (-webkit-min-device-pixel-ratio: 2),
only screen and (min--moz-device-pixel-ratio: 2),
only screen and (-o-min-device-pixel-ratio: 2/1),
only screen and (min-device-pixel-ratio: 2),
only screen and (min-resolution: 192dpi),
only screen and (min-resolution: 2dppx) {
.fblike {
background-image: url(https://i7.createsend1.com/static/eb/master/13-the-blueprint-3/images/fblike@2x.png) !important;
}
.tweet {
background-image: url(https://i8.createsend1.com/static/eb/master/13-the-blueprint-3/images/tweet@2x.png) !important;
}
.linkedinshare {
background-image: url(https://i9.createsend1.com/static/eb/master/13-the-blueprint-3/images/lishare@2x.png) !important;
}
.forwardtoafriend {
background-image: url(https://i10.createsend1.com/static/eb/master/13-the-blueprint-3/images/forward@2x.png) !important;
}
}
@media (max-width: 321px) {
.fixed-width.has-border .layout__inner {
border-width: 1px 0 !important;
}
.layout,
.stack .column {
min-width: 320px !important;
width: 320px !important;
}
.border {
display: none;
}
.has-gutter .border {
display: table-cell;
}
}
.mso div {
border: 0 none white !important;
}
.mso .w560 .divider {
Margin-left: 260px !important;
Margin-right: 260px !important;
}
.mso .w360 .divider {
Margin-left: 160px !important;
Margin-right: 160px !important;
}
.mso .w260 .divider {
Margin-left: 110px !important;
Margin-right: 110px !important;
}
.mso .w160 .divider {
Margin-left: 60px !important;
Margin-right: 60px !important;
}
.mso .w354 .divider {
Margin-left: 157px !important;
Margin-right: 157px !important;
}
.mso .w250 .divider {
Margin-left: 105px !important;
Margin-right: 105px !important;
}
.mso .w148 .divider {
Margin-left: 54px !important;
Margin-right: 54px !important;
}
.mso .size-8,
.ie .size-8 {
font-size: 8px !important;
line-height: 14px !important;
}
.mso .size-9,
.ie .size-9 {
font-size: 9px !important;
line-height: 16px !important;
}
.mso .size-10,
.ie .size-10 {
font-size: 10px !important;
line-height: 18px !important;
}
.mso .size-11,
.ie .size-11 {
font-size: 11px !important;
line-height: 19px !important;
}
.mso .size-12,
.ie .size-12 {
font-size: 12px !important;
line-height: 19px !important;
}
.mso .size-13,
.ie .size-13 {
font-size: 13px !important;
line-height: 21px !important;
}
.mso .size-14,
.ie .size-14 {
font-size: 14px !important;
line-height: 21px !important;
}
.mso .size-15,
.ie .size-15 {
font-size: 15px !important;
line-height: 23px !important;
}
.mso .size-16,
.ie .size-16 {
font-size: 16px !important;
line-height: 24px !important;
}
.mso .size-17,
.ie .size-17 {
font-size: 17px !important;
line-height: 26px !important;
}
.mso .size-18,
.ie .size-18 {
font-size: 18px !important;
line-height: 26px !important;
}
.mso .size-20,
.ie .size-20 {
font-size: 20px !important;
line-height: 28px !important;
}
.mso .size-22,
.ie .size-22 {
font-size: 22px !important;
line-height: 31px !important;
}
.mso .size-24,
.ie .size-24 {
font-size: 24px !important;
line-height: 32px !important;
}
.mso .size-26,
.ie .size-26 {
font-size: 26px !important;
line-height: 34px !important;
}
.mso .size-28,
.ie .size-28 {
font-size: 28px !important;
line-height: 36px !important;
}
.mso .size-30,
.ie .size-30 {
font-size: 30px !important;
line-height: 38px !important;
}
.mso .size-32,
.ie .size-32 {
font-size: 32px !important;
line-height: 40px !important;
}
.mso .size-34,
.ie .size-34 {
font-size: 34px !important;
line-height: 43px !important;
}
| |
1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.status = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('shutdown_args')
if self.status != None:
oprot.writeFieldBegin('status', TType.I32, 1)
oprot.writeI32(self.status)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class shutdown_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('shutdown_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ThriftHandle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createFile_args:
"""
Attributes:
- path
- mode
- overwrite
- bufferSize
- block_replication
- blocksize
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
(2, TType.I16, 'mode', None, None, ), # 2
(3, TType.BOOL, 'overwrite', None, None, ), # 3
(4, TType.I32, 'bufferSize', None, None, ), # 4
(5, TType.I16, 'block_replication', None, None, ), # 5
(6, TType.I64, 'blocksize', None, None, ), # 6
)
def __init__(self, path=None, mode=None, overwrite=None, bufferSize=None, block_replication=None, blocksize=None,):
self.path = path
self.mode = mode
self.overwrite = overwrite
self.bufferSize = bufferSize
self.block_replication = block_replication
self.blocksize = blocksize
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.mode = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.overwrite = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.bufferSize = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I16:
self.block_replication = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.blocksize = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createFile_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.mode != None:
oprot.writeFieldBegin('mode', TType.I16, 2)
oprot.writeI16(self.mode)
oprot.writeFieldEnd()
if self.overwrite != None:
oprot.writeFieldBegin('overwrite', TType.BOOL, 3)
oprot.writeBool(self.overwrite)
oprot.writeFieldEnd()
if self.bufferSize != None:
oprot.writeFieldBegin('bufferSize', TType.I32, 4)
oprot.writeI32(self.bufferSize)
oprot.writeFieldEnd()
if self.block_replication != None:
oprot.writeFieldBegin('block_replication', TType.I16, 5)
oprot.writeI16(self.block_replication)
oprot.writeFieldEnd()
if self.blocksize != None:
oprot.writeFieldBegin('blocksize', TType.I64, 6)
oprot.writeI64(self.blocksize)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createFile_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ThriftHandle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createFile_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # | |
取得第几页的数据,和 PageSize 同时传递才会生效。
:type PageNum: int
:param PageSize: 分页大小,和 PageNum 同时传递才会生效。
取值:10 ~ 100 之间的任意整数
:type PageSize: int
"""
self.PackageType = None
self.OrderBy = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.PackageType = params.get("PackageType")
self.OrderBy = params.get("OrderBy")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLivePackageInfoResponse(AbstractModel):
"""DescribeLivePackageInfo返回参数结构体
"""
def __init__(self):
r"""
:param LivePackageInfoList: 套餐包信息。
注意:此字段可能返回 null,表示取不到有效值。
:type LivePackageInfoList: list of LivePackageInfo
:param PackageBillMode: 套餐包当前计费方式:
-1: 无计费方式或获取失败
0: 无计费方式
201: 月结带宽
202: 月结流量
203: 日结带宽
204: 日结流量
205: 日结时长
206: 月结时长
304: 日结流量
注意:此字段可能返回 null,表示取不到有效值。
:type PackageBillMode: int
:param TotalPage: 总页数
注意:此字段可能返回 null,表示取不到有效值。
:type TotalPage: int
:param TotalNum: 数据总条数
注意:此字段可能返回 null,表示取不到有效值。
:type TotalNum: int
:param PageNum: 当前页数
注意:此字段可能返回 null,表示取不到有效值。
:type PageNum: int
:param PageSize: 当前每页数量
注意:此字段可能返回 null,表示取不到有效值。
:type PageSize: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LivePackageInfoList = None
self.PackageBillMode = None
self.TotalPage = None
self.TotalNum = None
self.PageNum = None
self.PageSize = None
self.RequestId = None
def _deserialize(self, params):
if params.get("LivePackageInfoList") is not None:
self.LivePackageInfoList = []
for item in params.get("LivePackageInfoList"):
obj = LivePackageInfo()
obj._deserialize(item)
self.LivePackageInfoList.append(obj)
self.PackageBillMode = params.get("PackageBillMode")
self.TotalPage = params.get("TotalPage")
self.TotalNum = params.get("TotalNum")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.RequestId = params.get("RequestId")
class DescribeLivePlayAuthKeyRequest(AbstractModel):
"""DescribeLivePlayAuthKey请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLivePlayAuthKeyResponse(AbstractModel):
"""DescribeLivePlayAuthKey返回参数结构体
"""
def __init__(self):
r"""
:param PlayAuthKeyInfo: 播放鉴权key信息。
:type PlayAuthKeyInfo: :class:`tencentcloud.live.v20180801.models.PlayAuthKeyInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PlayAuthKeyInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PlayAuthKeyInfo") is not None:
self.PlayAuthKeyInfo = PlayAuthKeyInfo()
self.PlayAuthKeyInfo._deserialize(params.get("PlayAuthKeyInfo"))
self.RequestId = params.get("RequestId")
class DescribeLivePullStreamTasksRequest(AbstractModel):
"""DescribeLivePullStreamTasks请求参数结构体
"""
def __init__(self):
r"""
:param TaskId: 任务 ID。
来源:调用 CreateLivePullStreamTask 接口时返回。
不填默认查询所有任务,按更新时间倒序排序。
:type TaskId: str
:param PageNum: 取得第几页,默认值:1。
:type PageNum: int
:param PageSize: 分页大小,默认值:10。
取值范围:1~20 之前的任意整数。
:type PageSize: int
"""
self.TaskId = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLivePullStreamTasksResponse(AbstractModel):
"""DescribeLivePullStreamTasks返回参数结构体
"""
def __init__(self):
r"""
:param TaskInfos: 直播拉流任务信息列表。
:type TaskInfos: list of PullStreamTaskInfo
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页大小。
:type PageSize: int
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param LimitTaskNum: 限制可创建的最大任务数。
:type LimitTaskNum: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskInfos = None
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.LimitTaskNum = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TaskInfos") is not None:
self.TaskInfos = []
for item in params.get("TaskInfos"):
obj = PullStreamTaskInfo()
obj._deserialize(item)
self.TaskInfos.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.LimitTaskNum = params.get("LimitTaskNum")
self.RequestId = params.get("RequestId")
class DescribeLivePushAuthKeyRequest(AbstractModel):
"""DescribeLivePushAuthKey请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 推流域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLivePushAuthKeyResponse(AbstractModel):
"""DescribeLivePushAuthKey返回参数结构体
"""
def __init__(self):
r"""
:param PushAuthKeyInfo: 推流鉴权key信息。
:type PushAuthKeyInfo: :class:`tencentcloud.live.v20180801.models.PushAuthKeyInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PushAuthKeyInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PushAuthKeyInfo") is not None:
self.PushAuthKeyInfo = PushAuthKeyInfo()
self.PushAuthKeyInfo._deserialize(params.get("PushAuthKeyInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveRecordRulesRequest(AbstractModel):
"""DescribeLiveRecordRules请求参数结构体
"""
class DescribeLiveRecordRulesResponse(AbstractModel):
"""DescribeLiveRecordRules返回参数结构体
"""
def __init__(self):
r"""
:param Rules: 规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveRecordTemplateRequest(AbstractModel):
"""DescribeLiveRecordTemplate请求参数结构体
"""
def __init__(self):
r"""
:param TemplateId: [DescribeLiveRecordTemplates](/document/product/267/32609)接口获取到的模板 ID。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveRecordTemplateResponse(AbstractModel):
"""DescribeLiveRecordTemplate返回参数结构体
"""
def __init__(self):
r"""
:param Template: 录制模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.RecordTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = RecordTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveRecordTemplatesRequest(AbstractModel):
"""DescribeLiveRecordTemplates请求参数结构体
"""
def __init__(self):
r"""
:param IsDelayLive: 是否属于慢直播模板,默认:0。
0: 标准直播。
1:慢直播。
:type IsDelayLive: int
"""
self.IsDelayLive = None
def _deserialize(self, params):
self.IsDelayLive = params.get("IsDelayLive")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveRecordTemplatesResponse(AbstractModel):
"""DescribeLiveRecordTemplates返回参数结构体
"""
def __init__(self):
r"""
:param Templates: 录制模板信息列表。
:type Templates: list of RecordTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = RecordTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotRulesRequest(AbstractModel):
"""DescribeLiveSnapshotRules请求参数结构体
"""
class DescribeLiveSnapshotRulesResponse(AbstractModel):
"""DescribeLiveSnapshotRules返回参数结构体
"""
def __init__(self):
r"""
:param Rules: 规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotTemplateRequest(AbstractModel):
"""DescribeLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
r"""
:param TemplateId: 模板 ID。
调用 [CreateLiveSnapshotTemplate](/document/product/267/32624) 时返回的模板 ID。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveSnapshotTemplateResponse(AbstractModel):
"""DescribeLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
r"""
:param Template: 截图模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.SnapshotTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = SnapshotTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotTemplatesRequest(AbstractModel):
"""DescribeLiveSnapshotTemplates请求参数结构体
"""
class DescribeLiveSnapshotTemplatesResponse(AbstractModel):
"""DescribeLiveSnapshotTemplates返回参数结构体
"""
def __init__(self):
r"""
:param Templates: 截图模板列表。
:type Templates: list of SnapshotTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = SnapshotTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveStreamEventListRequest(AbstractModel):
"""DescribeLiveStreamEventList请求参数结构体
"""
def __init__(self):
r"""
:param StartTime: 起始时间。
UTC 格式,例如:2018-12-29T19:00:00Z。
支持查询60天内的历史记录。
:type StartTime: str
:param EndTime: 结束时间。
UTC 格式,例如:2018-12-29T20:00:00Z。
不超过当前时间,且和起始时间相差不得超过30天。
:type EndTime: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称,不支持通配符(*)查询,默认模糊匹配。
可使用IsStrict字段改为精确查询。
:type StreamName: str
:param PageNum: 取得第几页。
默认值:1。
注: 目前只支持10000条内的查询。
:type PageNum: int
:param PageSize: 分页大小。
最大值:100。
取值范围:1~100 之间的任意整数。
默认值:10。
注: 目前只支持10000条内的查询。
:type PageSize: int
:param IsFilter: 是否过滤,默认不过滤。
0:不进行任何过滤。
1:过滤掉开播失败的,只返回开播成功的。
:type IsFilter: int
:param IsStrict: 是否精确查询,默认模糊匹配。
0:模糊匹配。
1:精确查询。
注:使用StreamName时该参数生效。
:type IsStrict: int
:param IsAsc: 是否按结束时间正序显示,默认逆序。
0:逆序。
1:正序。
:type IsAsc: int
"""
self.StartTime = None
self.EndTime = None
self.AppName = None
self.DomainName = None
self.StreamName = None
self.PageNum = None
self.PageSize = None
self.IsFilter = None
self.IsStrict = None
self.IsAsc = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.IsFilter = params.get("IsFilter")
self.IsStrict = params.get("IsStrict")
self.IsAsc = params.get("IsAsc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLiveStreamEventListResponse(AbstractModel):
"""DescribeLiveStreamEventList返回参数结构体
"""
def __init__(self):
r"""
:param EventList: 推断流事件列表。
:type EventList: list of StreamEventInfo
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页大小。
:type PageSize: int
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.EventList = None
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.RequestId = None
def _deserialize(self, params):
if params.get("EventList") is not None:
self.EventList = []
for item in params.get("EventList"):
obj = StreamEventInfo()
obj._deserialize(item)
self.EventList.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.RequestId = params.get("RequestId")
class DescribeLiveStreamOnlineListRequest(AbstractModel):
"""DescribeLiveStreamOnlineList请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 推流域名。多域名用户需要填写 DomainName。
:type DomainName: str
:param AppName: | |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
import os
from tritonclientutils import *
TEST_SYSTEM_SHARED_MEMORY = bool(
int(os.environ.get('TEST_SYSTEM_SHARED_MEMORY', 0)))
TEST_CUDA_SHARED_MEMORY = bool(int(os.environ.get('TEST_CUDA_SHARED_MEMORY',
0)))
CPU_ONLY = (os.environ.get('TRITON_SERVER_CPU_ONLY') is not None)
BACKENDS = os.environ.get(
'BACKENDS', "graphdef savedmodel netdef onnx libtorch plan custom python")
ENSEMBLES = bool(int(os.environ.get('ENSEMBLES', 1)))
np_dtype_string = np.dtype(object)
class InferTest(tu.TestResultCollector):
def _full_exact(self, input_dtype, output0_dtype, output1_dtype,
output0_raw, output1_raw, swap):
def _infer_exact_helper(tester,
pf,
tensor_shape,
batch_size,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=True,
output1_raw=True,
model_version=None,
swap=False,
outputs=("OUTPUT0", "OUTPUT1"),
use_http=True,
use_grpc=True,
use_http_json_tensors=True,
skip_request_id_check=True,
use_streaming=True,
correlation_id=0):
for bs in (1, batch_size):
# model that does not support batching
if bs == 1:
iu.infer_exact(
tester,
pf + "_nobatch",
tensor_shape,
bs,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw,
output1_raw,
model_version,
swap,
outputs,
use_http,
use_grpc,
use_http_json_tensors,
skip_request_id_check,
use_streaming,
correlation_id,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# model that supports batching
iu.infer_exact(
tester,
pf, (bs,) + tensor_shape,
bs,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw,
output1_raw,
model_version,
swap,
outputs,
use_http,
use_grpc,
use_http_json_tensors,
skip_request_id_check,
use_streaming,
correlation_id,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
input_size = 16
all_ensemble_prefix = ["simple_", "sequence_", "fan_"]
ensemble_prefix = [""]
if ENSEMBLES and ("custom" in BACKENDS):
for prefix in all_ensemble_prefix:
if tu.validate_for_ensemble_model(prefix, input_dtype,
output0_dtype, output1_dtype,
(input_size,), (input_size,),
(input_size,)):
ensemble_prefix.append(prefix)
if tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
(input_size,), (input_size,),
(input_size,)):
for prefix in ensemble_prefix:
for pf in ["graphdef", "savedmodel"]:
if pf in BACKENDS:
_infer_exact_helper(self,
prefix + pf, (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_c2_model(input_dtype, output0_dtype, output1_dtype,
(input_size,), (input_size,),
(input_size,)):
for prefix in ensemble_prefix:
if 'netdef' in BACKENDS:
_infer_exact_helper(self,
prefix + 'netdef', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if not CPU_ONLY and tu.validate_for_trt_model(
input_dtype, output0_dtype, output1_dtype, (input_size, 1, 1),
(input_size, 1, 1), (input_size, 1, 1)):
for prefix in ensemble_prefix:
if 'plan' in BACKENDS:
if input_dtype == np.int8:
_infer_exact_helper(self,
prefix + 'plan', (input_size, 1, 1),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
else:
_infer_exact_helper(self,
prefix + 'plan', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
# the custom model is src/custom/addsub... it does not swap
# the inputs so always set to False
if tu.validate_for_custom_model(input_dtype, output0_dtype,
output1_dtype, (input_size,),
(input_size,), (input_size,)):
# No basic ensemble models are created against custom models
if 'custom' in BACKENDS:
_infer_exact_helper(self,
'custom', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=False)
if tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype,
(input_size,), (input_size,),
(input_size,)):
for prefix in ensemble_prefix:
if 'onnx' in BACKENDS:
_infer_exact_helper(self,
prefix + 'onnx', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_libtorch_model(input_dtype, output0_dtype,
output1_dtype, (input_size,),
(input_size,), (input_size,)):
for prefix in ensemble_prefix:
if 'libtorch' in BACKENDS:
_infer_exact_helper(self,
prefix + 'libtorch', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if prefix == "":
if 'python' in BACKENDS:
_infer_exact_helper(self,
prefix + 'python', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
def test_raw_bbb(self):
self._full_exact(np.int8,
np.int8,
np.int8,
output0_raw=True,
output1_raw=True,
swap=True)
def test_raw_sss(self):
self._full_exact(np.int16,
np.int16,
np.int16,
output0_raw=True,
output1_raw=True,
swap=True)
def test_raw_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32,
output0_raw=True,
output1_raw=True,
swap=True)
def test_raw_lll(self):
self._full_exact(np.int64,
np.int64,
np.int64,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_hhh(self):
self._full_exact(np.float16,
np.float16,
np.float16,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_fff(self):
self._full_exact(np.float32,
np.float32,
np.float32,
output0_raw=True,
output1_raw=True,
swap=True)
def test_raw_hff(self):
self._full_exact(np.float16,
np.float32,
np.float32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_bii(self):
self._full_exact(np.int8,
np.int32,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ibb(self):
self._full_exact(np.int32,
np.int8,
np.int8,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ibs(self):
self._full_exact(np.int32,
np.int8,
np.int16,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_iff(self):
self._full_exact(np.int32,
np.float32,
np.float32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_fii(self):
self._full_exact(np.float32,
np.int32,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ihs(self):
self._full_exact(np.int32,
np.float16,
np.int16,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ooo(self):
self._full_exact(np_dtype_string,
np_dtype_string,
np_dtype_string,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_oii(self):
self._full_exact(np_dtype_string,
np.int32,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_oio(self):
self._full_exact(np_dtype_string,
np.int32,
np_dtype_string,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ooi(self):
self._full_exact(np_dtype_string,
np_dtype_string,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ioo(self):
self._full_exact(np.int32,
np_dtype_string,
np_dtype_string,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_iio(self):
self._full_exact(np.int32,
np.int32,
np_dtype_string,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ioi(self):
self._full_exact(np.int32,
np_dtype_string,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
# shared memory does not support class output
if not (TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY):
def test_class_bbb(self):
self._full_exact(np.int8,
np.int8,
np.int8,
output0_raw=False,
output1_raw=False,
swap=True)
def test_class_sss(self):
self._full_exact(np.int16,
np.int16,
np.int16,
output0_raw=False,
output1_raw=False,
swap=True)
def test_class_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32,
output0_raw=False,
output1_raw=False,
swap=True)
def test_class_lll(self):
self._full_exact(np.int64,
np.int64,
np.int64,
output0_raw=False,
output1_raw=False,
swap=False)
def test_class_fff(self):
self._full_exact(np.float32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=False,
swap=True)
def test_class_iff(self):
self._full_exact(np.int32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=False,
swap=False)
def test_mix_bbb(self):
self._full_exact(np.int8,
np.int8,
np.int8,
output0_raw=True,
output1_raw=False,
swap=True)
def test_mix_sss(self):
self._full_exact(np.int16,
np.int16,
np.int16,
output0_raw=False,
output1_raw=True,
swap=True)
def test_mix_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32,
output0_raw=True,
output1_raw=False,
swap=True)
def test_mix_lll(self):
self._full_exact(np.int64,
np.int64,
np.int64,
output0_raw=False,
output1_raw=True,
swap=False)
def test_mix_fff(self):
self._full_exact(np.float32,
np.float32,
np.float32,
output0_raw=True,
output1_raw=False,
swap=True)
def test_mix_iff(self):
self._full_exact(np.int32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=True,
swap=False)
def test_raw_version_latest_1(self):
input_size = 16
tensor_shape = (1, input_size)
# There are 3 versions of graphdef_int8_int8_int8 but
# only version 3 should be available
for platform in ('graphdef', 'savedmodel'):
if platform not in BACKENDS:
continue
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.int8,
np.int8,
np.int8,
model_version=1,
swap=False,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.int8,
np.int8,
np.int8,
model_version=2,
swap=True,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int8,
np.int8,
np.int8,
model_version=3,
swap=True,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
def test_raw_version_latest_2(self):
input_size = 16
tensor_shape = (1, input_size)
# There are 3 versions of graphdef_int16_int16_int16 but only
# versions 2 and 3 should be available
for platform in ('graphdef', 'savedmodel'):
if platform not in BACKENDS:
continue
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.int16,
np.int16,
np.int16,
model_version=1,
swap=False,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int16,
np.int16,
np.int16,
model_version=2,
swap=True,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int16,
np.int16,
np.int16,
model_version=3,
swap=True,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
def test_raw_version_all(self):
input_size = 16
tensor_shape = (1, input_size)
# There are 3 versions of *_int32_int32_int32 and all should
# be available.
for platform in ('graphdef', 'savedmodel', 'netdef'):
if platform not in BACKENDS:
continue
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
model_version=1,
swap=False,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
model_version=2,
swap=True,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
model_version=3,
swap=True,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
def test_raw_version_specific_1(self):
input_size = 16
tensor_shape = (1, input_size)
# There are 3 versions of *_float16_float16_float16 but only
# version 1 should be available.
for platform in ('graphdef', 'savedmodel'):
if platform not in BACKENDS:
continue
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.float16,
np.float16,
np.float16,
model_version=1,
swap=False,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.float16,
np.float16,
np.float16,
model_version=2,
swap=True,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.float16,
np.float16,
np.float16,
model_version=3,
swap=True,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
def test_raw_version_specific_1_3(self):
input_size = 16
# There are 3 versions of *_float32_float32_float32 but only
# versions 1 and 3 should be available.
for platform in ('graphdef', 'savedmodel', 'netdef', 'plan'):
if platform == 'plan' and CPU_ONLY:
continue
if platform not in BACKENDS:
continue
tensor_shape = (1, input_size)
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1,
| |
<gh_stars>0
"""
@author: <NAME>
@editor: <NAME>
"""
import sys
sys.path.insert(0, '../utilities')
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from scipy.interpolate import griddata
from pyDOE import lhs
from plotting import newfig, savefig
from mpl_toolkits.mplot3d import Axes3D
import time
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
np.random.seed(1234)
tf.set_random_seed(1234)
class PhysicsInformedNN:
# Initialize the class
def __init__(self, x0, u0, tb, X_f, X_f_val, layers, lb, ub, min_max_f, X_star, u_star):
X0 = np.concatenate((x0, 0 * x0), 1) # (x0, 0)
X_lb = np.concatenate((0 * tb + lb[0], tb), 1) # (lb[0], tb)
X_ub = np.concatenate((0 * tb + ub[0], tb), 1) # (ub[0], tb)
self.lb = lb
self.ub = ub
self.X_star = X_star
self.u_star = u_star
self.min_f = min_max_f[0]
self.max_f = min_max_f[1]
self.x0 = X0[:, 0:1]
self.t0 = X0[:, 1:2]
self.x_lb = X_lb[:, 0:1]
self.t_lb = X_lb[:, 1:2]
self.x_ub = X_ub[:, 0:1]
self.t_ub = X_ub[:, 1:2]
self.x_f = X_f[:, 0:1]
self.t_f = X_f[:, 1:2]
self.x_f_val = X_f_val[:, 0:1]
self.t_f_val = X_f_val[:, 1:2]
self.u0 = u0
self.log_var_u0 = tf.Variable(1.0, dtype='float32')
self.log_var_ub = tf.Variable(1.0, dtype='float32')
self.log_var_f = tf.Variable(1.0, dtype='float32')
# Initialize NNs
self.layers = layers
self.weights, self.biases = self.initialize_NN(layers)
# tf Placeholders
self.training = tf.placeholder(tf.bool)
self.penalties = tf.placeholder(tf.float32, shape=(3))
self.x0_tf = tf.placeholder(tf.float32, shape=[None, self.x0.shape[1]])
self.t0_tf = tf.placeholder(tf.float32, shape=[None, self.t0.shape[1]])
self.u0_tf = tf.placeholder(tf.float32, shape=[None, self.u0.shape[1]])
self.x_lb_tf = tf.placeholder(tf.float32, shape=[None, self.x_lb.shape[1]])
self.t_lb_tf = tf.placeholder(tf.float32, shape=[None, self.t_lb.shape[1]])
self.x_ub_tf = tf.placeholder(tf.float32, shape=[None, self.x_ub.shape[1]])
self.t_ub_tf = tf.placeholder(tf.float32, shape=[None, self.t_ub.shape[1]])
self.x_f_tf = tf.placeholder(tf.float32, shape=[None, self.x_f.shape[1]])
self.t_f_tf = tf.placeholder(tf.float32, shape=[None, self.t_f.shape[1]])
self.x_f_tf_val = tf.placeholder(tf.float32, shape=[None, self.x_f_val.shape[1]])
self.t_f_tf_val = tf.placeholder(tf.float32, shape=[None, self.t_f_val.shape[1]])
self.X_star_tf = tf.placeholder(tf.float32, shape=[None, self.X_star.shape[1]])
# tf Graphs
self.u0_pred, self.u0_x_pred = self.net_u(self.x0_tf, self.t0_tf)
self.u_lb_pred, self.u_x_lb_pred = self.net_u(self.x_lb_tf, self.t_lb_tf)
self.u_ub_pred, self.u_x_ub_pred = self.net_u(self.x_ub_tf, self.t_ub_tf)
self.f_pred = self.net_f(self.x_f_tf, self.t_f_tf)
self.s_pred = self.net_s(self.x_f_tf, self.t_f_tf)
self.f_pred_val = self.net_f(self.x_f_tf_val, self.t_f_tf_val)
self.s_pred_val = self.net_s(self.x_f_tf_val, self.t_f_tf_val)
self.u_pred, _ = self.net_u(self.X_star_tf[:, 0:1], self.X_star_tf[:, 1:2])
self.delta_u0 = tf.reduce_mean(self.u0_pred - self.u0_tf)
# MIN-MAX-SCALING BETWEEN [-sf, sf]
# sf = 1 # scale factor
# self.scaled_u0_tf = 2*sf * (self.u0_tf - tf.reduce_min(self.u0_tf)) / \
# (tf.reduce_max(self.u0_tf) - tf.reduce_min(self.u0_tf)) - sf
# self.scaled_u0_pred = 2*sf * (self.u0_pred - tf.reduce_min(self.u0_tf)) / \
# (tf.reduce_max(self.u0_tf) - tf.reduce_min(self.u0_tf)) - sf
# self.scaled_u_x_lb_pred = 2*sf*(self.u_x_lb_pred + 2e4) / (2e4 + 2e4) - sf
# self.scaled_u_x_ub_pred = 2*sf*(self.u_x_ub_pred + 2e4) / (2e4 + 2e4) - sf
# self.scaled_f_pred = 2*sf * (self.f_pred - self.min_f) / \
# (self.max_f - self.min_f) - sf
# self.scaled_s_pred = 2*sf * (self.s_pred - self.min_f) / \
# (self.max_f - self.min_f) - sf
# self.scaled_f_pred_val = self.min_max_scale(self.f_pred_val, self.min_f, self.max_f)
# self.scaled_s_pred_val = self.min_max_scale(self.s_pred_val, self.min_f, self.max_f)
# MAX ABS SCALING
abs_max_f = tf.cast(tf.reduce_max(tf.abs(min_max_f)), 'float32')
abs_max_u0 = tf.cast(tf.reduce_max(tf.abs(self.u0_tf)), 'float32')
self.scaled_u0_tf = self.u0_tf / abs_max_u0
self.scaled_u0_pred = self.u0_pred / abs_max_u0
self.scaled_u_x_lb_pred = self.u_x_lb_pred
self.scaled_u_x_ub_pred = self.u_x_ub_pred
self.scaled_f_pred = self.f_pred / abs_max_f
self.scaled_s_pred = self.s_pred / abs_max_f
self.scaled_f_pred_val = self.f_pred_val / abs_max_f
self.scaled_s_pred_val = self.s_pred_val / abs_max_f
# SCALED LOSSES FOR ADAPTIVE COST FUNCTION
self.loss_u0 = tf.reduce_mean(tf.square(self.scaled_u0_tf - self.scaled_u0_pred))
self.loss_ub = tf.reduce_mean(tf.square(self.scaled_u_x_lb_pred)) +\
tf.reduce_mean(tf.square(self.scaled_u_x_ub_pred))
self.loss_f = tf.reduce_mean(tf.square(self.scaled_f_pred - self.scaled_s_pred))
self.val_loss_f = tf.reduce_mean(tf.square(self.scaled_f_pred_val - self.scaled_s_pred_val))
# STANDARD LOSSES WITH OPTIONAL PENALTY FACTORS (penalties default to 1)
# ACTIVATE FOR OPTIONAL AUTO RESCALING AFTER FIRST ITERATIONS
# self.loss_u0 = self.penalties[0] * tf.reduce_mean(tf.square(self.u0_tf - self.u0_pred))
# self.loss_ub = self.penalties[1] * (tf.reduce_mean(tf.square(self.u_x_lb_pred)) +
# tf.reduce_mean(tf.square(self.u_x_ub_pred)))
# self.loss_f = self.penalties[2] * tf.reduce_mean(tf.square(self.f_pred - self.s_pred))
# self.val_loss_f = tf.reduce_mean(tf.square(self.f_pred_val - self.s_pred_val))
# LOSS FORMULATION FOR AUTO ADAPTIVE LOSS (NOT STABLE)
# self.loss_u0 = tf.sqrt(tf.reduce_sum(tf.square(self.u0_tf - self.u0_pred)))
# self.loss_ub = tf.sqrt(tf.reduce_sum(tf.square(self.u_x_lb_pred)) + tf.reduce_sum(tf.square(self.u_x_ub_pred)))
# self.loss_f = tf.sqrt(tf.reduce_sum(tf.square(self.f_pred - self.s_pred)))
# self.val_loss_f = tf.reduce_mean(tf.square(self.f_pred_val - self.s_pred_val))
self.loss = self.loss_u0 + self.loss_ub + self.loss_f
# ALTERNATIVE LOSS FORMULATIONS
# self.loss = tf.log(self.loss_u0 +1) + tf.log(self.loss_ub + 1) + tf.log(self.loss_f + 1) # TEST OF A SIMPLE LOG LOSS
# self.loss = self.adaptive_loss() # promising weighted loss approach https://arxiv.org/pdf/1705.07115.pdf
# Optimizers
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
method='L-BFGS-B',
options={'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1e0 * np.finfo(float).eps, # ftol
# 'gtol': 1e-12
})# change gtol
self.optimizer_Adam = tf.train.AdamOptimizer(0.001) # more data, higher learning rate
self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)
# tf session
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
init = tf.global_variables_initializer()
self.sess.run(init)
self.train_history = []
self.error_u_history = []
# FIRST IMPLEMENTATION OF AUTO ADAPTIVE LOSS (NOT STABLE)
# def adaptive_loss(self):
# pre_u0 = tf.exp(-self.log_var_u0)
# pre_ub = tf.exp(-self.log_var_ub)
# pre_f = tf.exp(-self.log_var_f)
# loss = pre_u0*self.loss_u0 + pre_ub*self.loss_ub + pre_f*self.loss_f + \
# self.log_var_u0 + self.log_var_ub + self.log_var_f
# return loss
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0, num_layers - 1):
W = self.xavier_init(size=[layers[l], layers[l + 1]])
b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
# GLOROT NORMAL INITIALIZATION
# def xavier_init(self, size):
# in_dim = size[0]
# out_dim = size[1]
# xavier_stddev = np.sqrt(2 / (in_dim + out_dim))
# return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
# GLOROT UNIFORM INITIALIZATION
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
limit = np.sqrt(6 / (in_dim + out_dim))
return tf.Variable(tf.random_uniform([in_dim, out_dim], -limit, limit), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
# H = (X - self.lb) / (self.ub - self.lb) # INPUT SCALING FOR SWISH / SQUARED RELU
H = 2.0 * (X - self.lb) / (self.ub - self.lb) - 1.0 # STANDARD MIN-MAX INPUT SCALING
for l in range(0, num_layers - 2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b)) # TANH ACTIVATION
# H = tf.square(tf.nn.relu(tf.add(tf.matmul(H, W), b))) # SQUARED RELU ACTIVATION
# H = tf.sin(tf.add(tf.matmul(H, W), b)) # SINE ACTIVATION
# H = tf.nn.swish(tf.add(tf.matmul(H, W), b)) # SWISH ACTIVATION
W = weights[-1]
b = biases[-1]
Y = tf.sinh(tf.add(tf.matmul(H, W), b)) # SINH OUTPUT ACTIVATION
# Y = tf.add(tf.matmul(H, W), b) # LINEAR OUTPUT ACTIVATION
return Y
def net_u(self, x, t):
X = tf.concat([x, t], 1)
u = self.neural_net(X, self.weights, self.biases)
u_x = tf.gradients(u, x)[0]
return u, u_x
def net_f(self, x, t):
# computations for the lhs
u, u_x = self.net_u(x, t)
u_t = tf.gradients(u, t)[0]
u_xx = tf.gradients(u_x, x)[0]
u_max = 800
us = u_max
k = (1.29 * 10 ** -2 * u*us + 6.856)
k_u = 1.29 * 10 ** -2 * us
k_x = k_u * u_x
c = (4.55 * 10 ** -4 * (u*us) ** 2 - 5.78 * 10 ** -3 * u*us + 5.849 * 10 ** 2)
f = c * u_t - k_x * u_x - k * u_xx
return f
def net_s(self, x, t):
t_max = 0.5
sigma = 0.02
u_max = 800
us = u_max
# computations for the rhs
p = 0.25 * tf.cos(2 * np.pi * t / t_max) + 0.5
p_t = tf.gradients(p, t)[0]
u_sol = u_max * tf.exp(-(x - p) ** 2 / (2 * sigma ** 2))
k_sol = 1.29 * 10 ** -2 * u_sol + 6.856
k_u_sol = 1.29 * 10 ** -2
c_sol = 4.55 * 10 ** -4 * u_sol ** 2 - 5.78 * 10 ** -3 * u_sol + 5.849 * 10 ** 2
s = 1/us * 1/sigma**2 * k_sol * u_sol + 1/us * u_sol * (x - p) * 1/sigma**2 * (
c_sol * p_t - (x - p) * 1/sigma**2 * (k_sol + u_sol * k_u_sol))
return s
def callback(self, loss, loss_u0, loss_ub, loss_f, val_loss_f, f_pred, scaled_f_pred, s_pred, scaled_s_pred, u_pred):
error_u = np.linalg.norm(self.u_star - u_pred, 2) / np.linalg.norm(self.u_star, 2)
print('f_pred: %.3e, scaled_f_pred: %.3e, s_pred: %.3e, scaled_s_pred: %.3e,' %
(np.max(f_pred), np.max(scaled_f_pred),
np.max(s_pred), np.max(scaled_s_pred)))
print('Loss: %.3e, Loss u0: %.3e, Loss ub: %.3e, Loss f: %.3e, Val. Loss f: %.3e' % (loss, loss_u0, loss_ub, loss_f, val_loss_f))
self.train_history.append([loss, loss_u0, loss_ub, loss_f, val_loss_f])
print('Error u: %e' % (error_u))
self.error_u_history.append(error_u)
def train(self, nIter):
tf_dict = {self.x0_tf: self.x0, self.t0_tf: self.t0,
self.u0_tf: self.u0,
self.x_lb_tf: self.x_lb, self.t_lb_tf: self.t_lb,
self.x_ub_tf: self.x_ub, self.t_ub_tf: self.t_ub,
self.x_f_tf: self.x_f, self.t_f_tf: self.t_f,
self.x_f_tf_val: self.x_f_val, self.t_f_tf_val: self.t_f_val,
self.X_star_tf: self.X_star,
self.penalties: np.array([1.,1.,1.]),
self.training: True}
# OPTIONAL AUTO SCALING BEFORE FIRST ITERATION
# loss_u0, loss_ub, | |
__self__).__init__(
'rancher2:index/clusterTemplate:ClusterTemplate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, annotations=None, default_revision_id=None, description=None, labels=None, members=None, name=None, template_revisions=None):
"""
Get an existing ClusterTemplate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] annotations: Annotations for the cluster template revision (map)
:param pulumi.Input[str] default_revision_id: (Computed) Default cluster template revision ID (string)
:param pulumi.Input[str] description: Cluster template description
:param pulumi.Input[dict] labels: Labels for the cluster template revision (map)
:param pulumi.Input[list] members: Cluster template members (list)
:param pulumi.Input[str] name: The cluster template revision name (string)
:param pulumi.Input[list] template_revisions: Cluster template revisions (list)
The **members** object supports the following:
* `accessType` (`pulumi.Input[str]`) - Member access type. Valid values: `["read-only" | "owner"]` (string)
* `group_principal_id` (`pulumi.Input[str]`) - Member group principal id (string)
* `user_principal_id` (`pulumi.Input[str]`) - Member user principal id (string)
The **template_revisions** object supports the following:
* `annotations` (`pulumi.Input[dict]`) - Annotations for the cluster template revision (map)
* `clusterConfig` (`pulumi.Input[dict]`) - Cluster configuration (list maxitem: 1)
* `cluster_auth_endpoint` (`pulumi.Input[dict]`) - Local cluster auth endpoint (list maxitems: 1)
* `ca_certs` (`pulumi.Input[str]`)
* `enabled` (`pulumi.Input[bool]`) - Enable cluster template revision. Default `true` (bool)
* `fqdn` (`pulumi.Input[str]`)
* `defaultClusterRoleForProjectMembers` (`pulumi.Input[str]`) - Default cluster role for project members (string)
* `default_pod_security_policy_template_id` (`pulumi.Input[str]`) - Default pod security policy template ID (string)
* `desired_agent_image` (`pulumi.Input[str]`) - Desired agent image (string)
* `desired_auth_image` (`pulumi.Input[str]`) - Desired auth image (string)
* `docker_root_dir` (`pulumi.Input[str]`) - Desired auth image (string)
* `enable_cluster_alerting` (`pulumi.Input[bool]`) - Enable built-in cluster alerting. Default: `false` (bool)
* `enable_cluster_monitoring` (`pulumi.Input[bool]`) - Enable built-in cluster monitoring. Default: `false` (bool)
* `enable_network_policy` (`pulumi.Input[bool]`) - Enable project network isolation. Default: `false` (bool)
* `rke_config` (`pulumi.Input[dict]`) - Rancher Kubernetes Engine Config (list maxitems: 1)
* `addonJobTimeout` (`pulumi.Input[float]`)
* `addons` (`pulumi.Input[str]`)
* `addonsIncludes` (`pulumi.Input[list]`)
* `authentication` (`pulumi.Input[dict]`)
* `sans` (`pulumi.Input[list]`)
* `strategy` (`pulumi.Input[str]`)
* `authorization` (`pulumi.Input[dict]`)
* `mode` (`pulumi.Input[str]`)
* `options` (`pulumi.Input[dict]`)
* `bastionHost` (`pulumi.Input[dict]`)
* `address` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[str]`)
* `sshAgentAuth` (`pulumi.Input[bool]`)
* `sshKey` (`pulumi.Input[str]`)
* `sshKeyPath` (`pulumi.Input[str]`)
* `user` (`pulumi.Input[str]`)
* `cloudProvider` (`pulumi.Input[dict]`)
* `awsCloudProvider` (`pulumi.Input[dict]`)
* `global` (`pulumi.Input[dict]`)
* `disableSecurityGroupIngress` (`pulumi.Input[bool]`)
* `disableStrictZoneCheck` (`pulumi.Input[bool]`)
* `elbSecurityGroup` (`pulumi.Input[str]`)
* `kubernetesClusterId` (`pulumi.Input[str]`)
* `kubernetesClusterTag` (`pulumi.Input[str]`)
* `roleArn` (`pulumi.Input[str]`)
* `routeTableId` (`pulumi.Input[str]`)
* `subnetId` (`pulumi.Input[str]`)
* `vpc` (`pulumi.Input[str]`)
* `zone` (`pulumi.Input[str]`)
* `serviceOverrides` (`pulumi.Input[list]`)
* `region` (`pulumi.Input[str]`)
* `service` (`pulumi.Input[str]`)
* `signingMethod` (`pulumi.Input[str]`)
* `signingName` (`pulumi.Input[str]`)
* `signingRegion` (`pulumi.Input[str]`)
* `url` (`pulumi.Input[str]`)
* `azureCloudProvider` (`pulumi.Input[dict]`)
* `aadClientCertPassword` (`pulumi.Input[str]`)
* `aadClientCertPath` (`pulumi.Input[str]`)
* `aadClientId` (`pulumi.Input[str]`)
* `aadClientSecret` (`pulumi.Input[str]`)
* `cloud` (`pulumi.Input[str]`)
* `cloudProviderBackoff` (`pulumi.Input[bool]`)
* `cloudProviderBackoffDuration` (`pulumi.Input[float]`)
* `cloudProviderBackoffExponent` (`pulumi.Input[float]`)
* `cloudProviderBackoffJitter` (`pulumi.Input[float]`)
* `cloudProviderBackoffRetries` (`pulumi.Input[float]`)
* `cloudProviderRateLimit` (`pulumi.Input[bool]`)
* `cloudProviderRateLimitBucket` (`pulumi.Input[float]`)
* `cloudProviderRateLimitQps` (`pulumi.Input[float]`)
* `location` (`pulumi.Input[str]`)
* `maximumLoadBalancerRuleCount` (`pulumi.Input[float]`)
* `primaryAvailabilitySetName` (`pulumi.Input[str]`)
* `primaryScaleSetName` (`pulumi.Input[str]`)
* `resourceGroup` (`pulumi.Input[str]`)
* `routeTableName` (`pulumi.Input[str]`)
* `securityGroupName` (`pulumi.Input[str]`)
* `subnetName` (`pulumi.Input[str]`)
* `subscriptionId` (`pulumi.Input[str]`)
* `tenant_id` (`pulumi.Input[str]`)
* `useInstanceMetadata` (`pulumi.Input[bool]`)
* `useManagedIdentityExtension` (`pulumi.Input[bool]`)
* `vmType` (`pulumi.Input[str]`)
* `vnetName` (`pulumi.Input[str]`)
* `vnetResourceGroup` (`pulumi.Input[str]`)
* `customCloudProvider` (`pulumi.Input[str]`)
* `name` (`pulumi.Input[str]`) - The cluster template revision name (string)
* `openstackCloudProvider` (`pulumi.Input[dict]`)
* `blockStorage` (`pulumi.Input[dict]`)
* `bsVersion` (`pulumi.Input[str]`)
* `ignoreVolumeAz` (`pulumi.Input[bool]`)
* `trustDevicePath` (`pulumi.Input[bool]`)
* `global` (`pulumi.Input[dict]`)
* `authUrl` (`pulumi.Input[str]`)
* `caFile` (`pulumi.Input[str]`)
* `domainId` (`pulumi.Input[str]`)
* `domainName` (`pulumi.Input[str]`)
* `password` (`pulumi.Input[str]`)
* `region` (`pulumi.Input[str]`)
* `tenant_id` (`pulumi.Input[str]`)
* `tenantName` (`pulumi.Input[str]`)
* `trustId` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
* `loadBalancer` (`pulumi.Input[dict]`)
* `createMonitor` (`pulumi.Input[bool]`)
* `floatingNetworkId` (`pulumi.Input[str]`)
* `lbMethod` (`pulumi.Input[str]`)
* `lbProvider` (`pulumi.Input[str]`)
* `lbVersion` (`pulumi.Input[str]`)
* `manageSecurityGroups` (`pulumi.Input[bool]`)
* `monitorDelay` (`pulumi.Input[str]`)
* `monitorMaxRetries` (`pulumi.Input[float]`)
* `monitorTimeout` (`pulumi.Input[str]`)
* `subnetId` (`pulumi.Input[str]`)
* `useOctavia` (`pulumi.Input[bool]`)
* `metadata` (`pulumi.Input[dict]`)
* `requestTimeout` (`pulumi.Input[float]`)
* `searchOrder` (`pulumi.Input[str]`)
* `route` (`pulumi.Input[dict]`)
* `routerId` (`pulumi.Input[str]`)
* `vsphereCloudProvider` (`pulumi.Input[dict]`)
* `disk` (`pulumi.Input[dict]`)
* `scsiControllerType` (`pulumi.Input[str]`)
* `global` (`pulumi.Input[dict]`)
* `datacenters` (`pulumi.Input[str]`)
* `insecureFlag` (`pulumi.Input[bool]`)
* `password` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[str]`)
* `soapRoundtripCount` (`pulumi.Input[float]`)
* `user` (`pulumi.Input[str]`)
* `network` (`pulumi.Input[dict]`)
* `publicNetwork` (`pulumi.Input[str]`)
* `virtualCenters` (`pulumi.Input[list]`)
* `datacenters` (`pulumi.Input[str]`)
* `name` (`pulumi.Input[str]`) - The cluster template revision name (string)
* `password` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[str]`)
* `soapRoundtripCount` (`pulumi.Input[float]`)
* `user` (`pulumi.Input[str]`)
* `workspace` (`pulumi.Input[dict]`)
* `datacenter` (`pulumi.Input[str]`)
* `defaultDatastore` (`pulumi.Input[str]`)
* `folder` (`pulumi.Input[str]`)
* `resourcepoolPath` (`pulumi.Input[str]`)
* `server` (`pulumi.Input[str]`)
* `dns` (`pulumi.Input[dict]`)
* `nodeSelector` (`pulumi.Input[dict]`)
* `provider` (`pulumi.Input[str]`)
* `reverseCidrs` (`pulumi.Input[list]`)
* `upstreamNameservers` (`pulumi.Input[list]`)
* `ignoreDockerVersion` (`pulumi.Input[bool]`)
* `ingress` (`pulumi.Input[dict]`)
* `dnsPolicy` (`pulumi.Input[str]`)
* `extraArgs` (`pulumi.Input[dict]`)
* `nodeSelector` (`pulumi.Input[dict]`)
* `options` (`pulumi.Input[dict]`)
* `provider` (`pulumi.Input[str]`)
* `kubernetesVersion` (`pulumi.Input[str]`)
* `monitoring` (`pulumi.Input[dict]`)
* `options` (`pulumi.Input[dict]`)
* `provider` (`pulumi.Input[str]`)
* `network` (`pulumi.Input[dict]`)
* `calicoNetworkProvider` (`pulumi.Input[dict]`)
* `cloudProvider` (`pulumi.Input[str]`)
* `canalNetworkProvider` (`pulumi.Input[dict]`)
* `iface` (`pulumi.Input[str]`)
* `flannelNetworkProvider` (`pulumi.Input[dict]`)
* `iface` (`pulumi.Input[str]`)
* `options` (`pulumi.Input[dict]`)
* `plugin` (`pulumi.Input[str]`)
* `weaveNetworkProvider` (`pulumi.Input[dict]`)
* `password` (`pulumi.Input[str]`)
* `nodes` (`pulumi.Input[list]`)
* `address` (`pulumi.Input[str]`)
* `dockerSocket` (`pulumi.Input[str]`)
* `hostnameOverride` (`pulumi.Input[str]`)
* `internalAddress` (`pulumi.Input[str]`)
* `labels` (`pulumi.Input[dict]`) - Labels for the cluster template revision (map)
* `nodeId` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[str]`)
* `roles` (`pulumi.Input[list]`)
* `sshAgentAuth` (`pulumi.Input[bool]`)
* `sshKey` (`pulumi.Input[str]`)
* `sshKeyPath` (`pulumi.Input[str]`)
* `user` (`pulumi.Input[str]`)
* `prefixPath` (`pulumi.Input[str]`)
* `privateRegistries` (`pulumi.Input[list]`)
* `isDefault` (`pulumi.Input[bool]`)
* `password` (`pulumi.Input[str]`)
* `url` (`pulumi.Input[str]`)
* `user` (`pulumi.Input[str]`)
* `services` (`pulumi.Input[dict]`)
* `etcd` (`pulumi.Input[dict]`)
* `backup_config` (`pulumi.Input[dict]`)
* `enabled` (`pulumi.Input[bool]`) - Enable cluster template revision. Default `true` (bool)
* `intervalHours` (`pulumi.Input[float]`)
* `retention` (`pulumi.Input[float]`)
* `s3BackupConfig` (`pulumi.Input[dict]`)
* `access_key` (`pulumi.Input[str]`)
* `bucketName` (`pulumi.Input[str]`)
* `customCa` (`pulumi.Input[str]`)
* `endpoint` (`pulumi.Input[str]`)
* `folder` (`pulumi.Input[str]`)
* `region` (`pulumi.Input[str]`)
* `secret_key` (`pulumi.Input[str]`)
* `safeTimestamp` (`pulumi.Input[bool]`)
* `caCert` (`pulumi.Input[str]`)
* `cert` (`pulumi.Input[str]`)
* `creation` (`pulumi.Input[str]`)
* `externalUrls` (`pulumi.Input[list]`)
* `extraArgs` (`pulumi.Input[dict]`)
* `extraBinds` (`pulumi.Input[list]`)
* `extraEnvs` (`pulumi.Input[list]`)
* `gid` (`pulumi.Input[float]`)
* `image` (`pulumi.Input[str]`)
* `key` (`pulumi.Input[str]`)
* `path` (`pulumi.Input[str]`)
* `retention` (`pulumi.Input[str]`)
* `snapshot` (`pulumi.Input[bool]`)
* `uid` (`pulumi.Input[float]`)
* `kubeApi` (`pulumi.Input[dict]`)
* `admissionConfiguration` (`pulumi.Input[dict]`)
* `alwaysPullImages` (`pulumi.Input[bool]`)
* `auditLog` (`pulumi.Input[dict]`)
* `configuration` (`pulumi.Input[dict]`)
* `format` (`pulumi.Input[str]`)
* `maxAge` (`pulumi.Input[float]`)
* `maxBackup` (`pulumi.Input[float]`)
* `maxSize` (`pulumi.Input[float]`)
* `path` (`pulumi.Input[str]`)
* `policy` (`pulumi.Input[str]`)
* `enabled` (`pulumi.Input[bool]`) - Enable cluster template revision. Default `true` (bool)
* `eventRateLimit` (`pulumi.Input[dict]`)
* `configuration` (`pulumi.Input[dict]`)
* `enabled` (`pulumi.Input[bool]`) - Enable cluster template revision. Default `true` (bool)
* `extraArgs` (`pulumi.Input[dict]`)
* `extraBinds` (`pulumi.Input[list]`)
* `extraEnvs` (`pulumi.Input[list]`)
* `image` (`pulumi.Input[str]`)
* `podSecurityPolicy` (`pulumi.Input[bool]`)
* `secretsEncryptionConfig` (`pulumi.Input[dict]`)
* `customConfig` (`pulumi.Input[dict]`)
* `enabled` (`pulumi.Input[bool]`) - Enable cluster template revision. Default `true` (bool)
* `serviceClusterIpRange` (`pulumi.Input[str]`)
* `serviceNodePortRange` (`pulumi.Input[str]`)
* `kubeController` (`pulumi.Input[dict]`)
* `clusterCidr` (`pulumi.Input[str]`)
* `extraArgs` (`pulumi.Input[dict]`)
* `extraBinds` (`pulumi.Input[list]`)
* `extraEnvs` (`pulumi.Input[list]`)
* `image` (`pulumi.Input[str]`)
* `serviceClusterIpRange` (`pulumi.Input[str]`)
* `kubelet` (`pulumi.Input[dict]`)
* `clusterDnsServer` (`pulumi.Input[str]`)
* `clusterDomain` (`pulumi.Input[str]`)
* `extraArgs` (`pulumi.Input[dict]`)
* `extraBinds` (`pulumi.Input[list]`)
* `extraEnvs` (`pulumi.Input[list]`)
* `failSwapOn` (`pulumi.Input[bool]`)
* `generateServingCertificate` (`pulumi.Input[bool]`)
* `image` (`pulumi.Input[str]`)
* `infraContainerImage` (`pulumi.Input[str]`)
* `kubeproxy` (`pulumi.Input[dict]`)
* `extraArgs` (`pulumi.Input[dict]`)
* `extraBinds` (`pulumi.Input[list]`)
* `extraEnvs` (`pulumi.Input[list]`)
* `image` (`pulumi.Input[str]`)
* `scheduler` (`pulumi.Input[dict]`)
* `extraArgs` (`pulumi.Input[dict]`)
* `extraBinds` (`pulumi.Input[list]`)
* `extraEnvs` (`pulumi.Input[list]`)
* `image` (`pulumi.Input[str]`)
* `sshAgentAuth` (`pulumi.Input[bool]`)
* `sshCertPath` (`pulumi.Input[str]`)
* `sshKeyPath` (`pulumi.Input[str]`)
* `windows_prefered_cluster` (`pulumi.Input[bool]`) - Windows prefered cluster. Default: `false` (bool)
* `cluster_template_id` (`pulumi.Input[str]`) - Cluster template ID (string)
* `default` (`pulumi.Input[bool]`) - Default variable value (string)
* `enabled` (`pulumi.Input[bool]`) - Enable cluster template revision. Default `true` (bool)
* `id` (`pulumi.Input[str]`) - The cluster template revision ID (string)
* `labels` (`pulumi.Input[dict]`) - Labels for the cluster template revision (map)
* `name` (`pulumi.Input[str]`) - The cluster template revision name (string)
* `questions` (`pulumi.Input[list]`) - Cluster template questions (list)
* `default` (`pulumi.Input[str]`) - Default variable value (string)
* `required` (`pulumi.Input[bool]`) - Required variable. Default `false` (bool)
* `type` (`pulumi.Input[str]`) - Variable type. `boolean`, `int` and `string` are allowed. Default `string` (string)
* `variable` (`pulumi.Input[str]`) - Variable name (string)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["annotations"] = annotations
__props__["default_revision_id"] = default_revision_id
__props__["description"] = description
__props__["labels"] = labels
__props__["members"] = members
__props__["name"] = name
__props__["template_revisions"] = template_revisions
return ClusterTemplate(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or | |
import logging
import json
import datetime
import typing
import uuid
from flask import g
from driftbase.models.db import Match
from driftbase import flexmatch
from driftbase.lobbies import InvalidRequestException, NotFoundException, UnauthorizedException, ConflictException, _post_lobby_event_to_members, _get_lobby_member_player_ids, _get_lobby_key, _get_lobby_host_player_id, _get_player_lobby_key
from driftbase.utils.redis_utils import JsonLock
MATCH_PROVIDER = "gamelift"
"""
Only lobby matches with GameLift provider supported at the time of writing!!!
"""
# TODO: Not use protected/private functions in lobbies module
log = logging.getLogger(__name__)
def get_player_match_placement(player_id: int, expected_match_placement_id: typing.Optional[str] = None) -> dict:
player_match_placement_key = _get_player_match_placement_key(player_id)
placement_id = g.redis.conn.get(player_match_placement_key)
if not placement_id:
log.info(f"Player '{player_id}' attempted to fetch a match placement without having a match placement")
message = f"Match placement {expected_match_placement_id} not found" if expected_match_placement_id else "No match placement found"
raise NotFoundException(message)
if expected_match_placement_id and expected_match_placement_id != placement_id:
log.warning(f"Player '{player_id}' attempted to fetch match placement '{expected_match_placement_id}', but the player didn't issue the match placement")
raise UnauthorizedException(f"You don't have permission to access match placement {expected_match_placement_id}")
with JsonLock(_get_match_placement_key(placement_id)) as match_placement_lock:
if placement_id != g.redis.conn.get(player_match_placement_key):
log.warning(f"Player '{player_id}' attempted to get match placement '{placement_id}', but was no longer assigned to the match placement after getting the lock")
raise ConflictException("You were no longer assigned to the match placement while attempting to fetch it")
placement = match_placement_lock.value
if not placement:
log.warning(f"Player '{player_id}' is assigned to match placement '{placement_id}' but the match placement doesn't exist")
g.redis.conn.delete(player_match_placement_key)
raise NotFoundException("No match placement found")
log.info(f"Returning match placement '{placement_id}' for player '{player_id}'")
return placement
def start_lobby_match_placement(player_id: int, queue: str, lobby_id: str) -> dict:
player_lobby_key = _get_player_lobby_key(player_id)
# Check lobby id
player_lobby_id = g.redis.conn.get(player_lobby_key)
if not player_lobby_id:
log.warning(f"Player '{player_id}' is attempting to start match for lobby '{lobby_id}', but is supposed to be in lobby '{player_lobby_id}'")
raise InvalidRequestException(f"You aren't in any lobby. Only lobby match placements are supported")
if player_lobby_id != lobby_id:
log.warning(f"Player '{player_id}' is attempting to start match for lobby '{lobby_id}', but is supposed to be in lobby '{player_lobby_id}'")
raise UnauthorizedException(f"You don't have permission to access lobby {lobby_id}")
# Check existing placement
player_match_placement_key = _get_player_match_placement_key(player_id)
existing_placement_id = g.redis.conn.get(player_match_placement_key)
if existing_placement_id:
with JsonLock(_get_match_placement_key(existing_placement_id)) as match_placement_lock:
if existing_placement_id != g.redis.conn.get(player_match_placement_key):
log.warning(f"Existing match placement check failed for player '{player_id}'. Player was assigned to match placement'{existing_placement_id}', but was no longer assigned to the match placement after getting the lock")
raise ConflictException("You were no longer assigned to the match placement while attempting to fetch it")
placement = match_placement_lock.value
if not placement:
log.warning(f"Player '{player_id}' is assigned to match placement '{existing_placement_id}' but the match placement doesn't exist")
g.redis.conn.delete(player_match_placement_key)
existing_placement_id = None
elif placement["status"] == "pending":
log.warning(f"Player '{player_id}' attempted to start a match placement while assigned to pending match placement '{existing_placement_id}'")
raise InvalidRequestException("You have a pending match placement in progress")
with JsonLock(_get_lobby_key(lobby_id)) as lobby_lock:
if lobby_id != g.redis.conn.get(player_lobby_key):
log.warning(f"Player '{player_id}' attempted to start lobby match placement for lobby '{lobby_id}', but left the lobby while acquiring the lobby lock")
raise ConflictException(f"You left the lobby while attempting to start the lobby match placement")
if existing_placement_id != g.redis.conn.get(player_match_placement_key):
log.warning(f"Player '{player_id}' attempted to start lobby match placement for lobby '{lobby_id}', but was assigned to a match placement while acquiring the lobby lock")
raise ConflictException("You were assigned to a match placement while attempting to start the lobby match placement")
lobby = lobby_lock.value
if not lobby:
raise RuntimeError(f"Player '{player_id}' is attempting to start match for nonexistent lobby '{lobby_id}'. Player is supposed to be in said lobby")
# Verify host
host_player_id = _get_lobby_host_player_id(lobby)
if player_id != host_player_id:
log.warning(f"Player '{player_id}' attempted to start the match for lobby '{lobby_id}' without being the lobby host")
raise UnauthorizedException(f"You aren't the host of lobby {lobby_id}. Only the lobby host can start the lobby match")
# Prevent issuing another placement request
if lobby["status"] == "starting":
log.warning(f"Player '{player_id}' attempted to start the match for lobby '{lobby_id}' while the match is starting")
raise InvalidRequestException(f"An active match placement is already in progress for the lobby")
# Request a game server
lobby_name = lobby["lobby_name"]
placement_id = str(uuid.uuid4())
max_player_session_count = lobby["team_capacity"] * len(lobby["team_names"])
game_session_name = f"Lobby-{lobby_id}-{lobby_name}"
custom_data = lobby["custom_data"]
lobby["placement_id"] = placement_id
player_latencies = []
for member in lobby["members"]:
for region, latency in flexmatch.get_player_latency_averages(member["player_id"]).items():
player_latencies.append({
"PlayerId": str(member["player_id"]),
"RegionIdentifier": region,
"LatencyInMilliseconds": latency
})
log.info(f"Host player '{player_id}' is starting lobby match for lobby '{lobby_id}' in queue '{queue}'. GameLift placement id: '{placement_id}'")
response = flexmatch.start_game_session_placement(
PlacementId=placement_id,
GameSessionQueueName=queue,
MaximumPlayerSessionCount=max_player_session_count,
GameSessionName=game_session_name,
GameProperties=[
{
"Key": "lobby",
"Value": "true",
},
],
PlayerLatencies=player_latencies,
DesiredPlayerSessions=[
{
"PlayerId": str(member["player_id"]),
"PlayerData": json.dumps({
"player_name": member["player_name"],
"team_name": member["team_name"],
"host": member["host"],
}),
}
for member in lobby["members"] if member["team_name"]
],
GameSessionData=json.dumps({
"lobby_id": lobby_id,
"lobby_name": lobby_name,
"lobby_map": lobby["map_name"],
"lobby_members": [
{
"player_id": str(member["player_id"]),
"player_name": member["player_name"],
"team_name": member["team_name"],
"host": member["host"],
}
for member in lobby["members"]
],
"lobby_custom_data": custom_data,
}),
)
log.debug(f"match_placements::start_lobby_match_placement() start_game_session_placement response: '{_jsonify(response)}'")
# Check if another placement started for the player while waiting for a response
if existing_placement_id != g.redis.conn.get(player_match_placement_key):
log.warning(
f"Player '{player_id}' attempted to start lobby match placement for lobby '{lobby_id}', but was assigned to a match placement while starting the match placement. Stopping created match placement '{placement_id}'")
response = flexmatch.stop_game_session_placement(placement_id)
log.debug(f"match_placements::start_lobby_match_placement() stop_game_session_placement response: '{_jsonify(response)}'")
raise ConflictException("You were assigned to a match placement while attempting to start the lobby match placement")
# Lock used as a convenient way to serialize the JSON value - Locking isn't required here since this is a new placement id
with JsonLock(_get_match_placement_key(placement_id)) as match_placement_lock:
match_placement = {
"placement_id": placement_id,
"player_id": player_id,
"match_provider": MATCH_PROVIDER,
"queue": queue,
"lobby_id": lobby_id,
"status": "pending",
"create_date": datetime.datetime.utcnow().isoformat(),
}
match_placement_lock.value = match_placement
g.redis.conn.set(player_match_placement_key, placement_id)
lobby["status"] = "starting"
lobby["placement_date"] = datetime.datetime.utcnow().isoformat()
lobby_lock.value = lobby
log.info(f"GameLift game session placement issued for lobby '{lobby_id}' by host player '{player_id}'")
# Notify members
receiving_player_ids = _get_lobby_member_player_ids(lobby, [player_id])
_post_lobby_event_to_members(receiving_player_ids, "LobbyMatchStarting", {"lobby_id": lobby_id, "status": lobby["status"]})
return match_placement
def stop_player_match_placement(player_id: int, expected_match_placement_id: str):
player_match_placement_key = _get_player_match_placement_key(player_id)
placement_id = g.redis.conn.get(player_match_placement_key)
if expected_match_placement_id != placement_id:
log.warning(f"Player '{player_id}' attempted to stop match placement '{expected_match_placement_id}', but the player didn't issue the match placement")
raise UnauthorizedException(f"You don't have permission to access match placement {expected_match_placement_id}")
with JsonLock(_get_match_placement_key(placement_id)) as match_placement_lock:
if placement_id != g.redis.conn.get(player_match_placement_key):
log.warning(f"Player '{player_id}' attempted to stop match placement '{placement_id}', but was assigned to a different match placement while acquiring the match placement lock")
raise ConflictException("You were assigned to a different match placement while attempting to stop the match placement")
placement = match_placement_lock.value
if placement:
lobby_id = placement.get("lobby_id", None)
if not lobby_id:
raise RuntimeError(f"Match placement '{placement_id}' doesn't contain a 'lobby_id' field. Only lobby match placements are supported at this time")
match_provider = placement["match_provider"]
if match_provider != MATCH_PROVIDER:
raise RuntimeError(f"Invalid match provider configured, '{match_provider}'. Only the GameLift match provider is supported at this time")
placement_status = placement["status"]
if placement_status != "pending":
log.warning(f"Player '{player_id}' attempted to stop match placement '{expected_match_placement_id}', but the placement is in status '{placement_status}'")
raise InvalidRequestException(f"Cannot stop a match placement in status {placement_status}")
response = flexmatch.stop_game_session_placement(placement_id)
log.debug(f"match_placements::stop_player_match_placement() stop_game_session_placement response: '{_jsonify(response)}'")
log.info(f"Player '{player_id}' stopped match placement '{placement_id}'")
match_placement_lock.value = None
else:
log.warning(f"Player '{player_id}' attempted to stop match placement '{placement_id}', but the match placement doesn't exist")
g.redis.conn.delete(player_match_placement_key)
def process_gamelift_queue_event(queue_name: str, message: dict):
log.debug(f"match-placements::process_gamelift_queue_event() received event in queue '{queue_name}': '{message}'")
event_details = _get_event_details(message)
event_type = event_details.get("type", None)
if event_type is None:
raise RuntimeError(f"No event type found. Message: '{message}'")
log.info(f"Incoming '{event_type}' queue event: '{event_details}'")
if event_type == "PlacementFulfilled":
return _process_fulfilled_queue_event(event_details)
if event_type == "PlacementCancelled":
return _process_cancelled_queue_event(event_details)
if event_type == "PlacementTimedOut":
return _process_timed_out_queue_event(event_details)
if event_type == "PlacementFailed":
return _process_failed_queue_event(event_details)
raise RuntimeError(f"Unknown event '{event_type}'")
def process_match_message(queue_name: str, message: dict):
log.debug(f"match-placements::process_match_message() received event in queue '{queue_name}': '{message}'")
event = message["event"]
if event == "match_status_changed":
match_id = message.get("match_id", None)
if match_id is None:
log.error(f"Malformed '{event}' event; 'match_id' is missing. Message: '{message}'")
return
match_status = message.get("match_status", None)
if match_status is None:
log.error(f"Malformed '{event}' event; 'match_status' is missing. Message: '{message}'")
return
if match_status == "ended":
return _process_match_ended(match_id)
else:
log.error(f"Unexpected event '{event}' published.")
# Helpers
def _get_match_placement_key(placement_id: str) -> str:
return g.redis.make_key(f"match-placement:{placement_id}:")
def _get_player_match_placement_key(player_id: int) -> str:
return g.redis.make_key(f"player:{player_id}:match-placement:")
def _get_tenant_name():
return g.conf.tenant.get('tenant_name')
def _get_event_details(event: dict):
if event.get("detail-type", None) != "GameLift Queue Placement Event":
raise RuntimeError("Event is not a GameLift Queue Placement Event!")
details = event.get("detail", None)
if details is None:
raise RuntimeError("Event is missing details!")
return details
def _get_placement_duration(event_details: dict) -> float:
start_time = datetime.datetime.fromisoformat(event_details["startTime"].removesuffix("Z"))
end_time = datetime.datetime.fromisoformat(event_details["endTime"].removesuffix("Z"))
delta = end_time - start_time
return delta.total_seconds()
def _validate_gamelift_placement_for_queue_event(placement_id: str, placement: dict) | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from six.moves import configparser
from oslo_config import cfg
from dragonflow.common import utils as df_utils
from dragonflow.controller.common import constants as const
from dragonflow.tests.common import utils
from dragonflow.tests.fullstack import test_base
from dragonflow.tests.fullstack import test_objects as objects
ML2_CONF_INI = '/etc/neutron/plugins/ml2/ml2_conf.ini'
PROVIDER_NET_APP_NAME = 'provider_networks_app.ProviderNetworksApp'
TUNNEL_NET_APP_NAME = 'tunneling_app.TunnelingApp'
VLAN_MIN_DEFAULT = 2
VLAN_TAG_BITS = 12
VLAN_MASK = df_utils.get_bitmask(VLAN_TAG_BITS)
OFPVID_PRESENT = 0x1000
class TestL2FLows(test_base.DFTestBase):
def _get_metadata_id(self, flows, ip, mac):
for flow in flows:
if flow['table'] == str(const.L3_PROACTIVE_LOOKUP_TABLE):
if 'nw_dst=' + ip in flow['match'] and mac in flow['actions']:
m = re.search('metadata=0x([0-9a-f]+)', flow['match'])
if m:
return m.group(1)
return None
def test_tunnel_network_flows(self):
if self._check_tunneling_app_enable() is False:
return
network = objects.NetworkTestObj(self.neutron, self.nb_api)
self.addCleanup(network.close)
network_id = network.create()
network_params = network.get_network()
segmentation_id = network_params['network']['provider:segmentation_id']
subnet = {'network_id': network_id,
'cidr': '10.200.0.0/24',
'gateway_ip': '10.200.0.1',
'ip_version': 4,
'name': 'private',
'enable_dhcp': True}
subnet = self.neutron.create_subnet({'subnet': subnet})
self.assertIsNotNone(subnet)
ovs = utils.OvsFlowsParser()
vm = objects.VMTestObj(self, self.neutron)
self.addCleanup(vm.close)
vm.create(network=network)
ip = vm.get_first_ipv4()
self.assertIsNotNone(ip)
self.assertIsNotNone(vm.server.addresses['mynetwork'])
mac = vm.server.addresses['mynetwork'][0]['OS-EXT-IPS-MAC:mac_addr']
self.assertIsNotNone(mac)
metadataid = utils.wait_until_is_and_return(
lambda: self._get_metadata_id(ovs.dump(self.integration_bridge),
ip, mac),
exception=Exception('Metadata id was not found in OpenFlow rules')
)
port = utils.wait_until_is_and_return(
lambda: utils.find_logical_port(self.nb_api, ip, mac),
exception=Exception('No port assigned to VM')
)
tunnel_key = port.unique_key
tunnel_key_hex = hex(tunnel_key)
n_type = network.get_network()['network']['provider:network_type']
port_num = self.vswitch_api.get_vtp_ofport(n_type)
r = self._check_tunnel_flows(ovs.dump(self.integration_bridge),
metadataid,
hex(segmentation_id),
tunnel_key_hex,
mac, port_num)
for key, value in r.items():
self.assertIsNotNone(value, key)
vm.close()
network.close()
def test_vlan_network_flows(self):
if not self._check_providers_net_app_enable():
return
physical_network, vlan_min = self._parse_network_vlan_ranges()
if physical_network is None or vlan_min is None:
self.assertIsNotNone(None)
return
# Create network
network = objects.NetworkTestObj(self.neutron, self.nb_api)
self.addCleanup(network.close)
network_params = {"name": "vlan_1",
"provider:network_type": "vlan",
"provider:physical_network": physical_network,
"provider:segmentation_id": vlan_min}
network_id = network.create(network=network_params)
# Create subnet
subnet_params = {'network_id': network_id,
'cidr': '172.16.31.10/24',
'gateway_ip': '10.64.2.1',
'ip_version': 4,
'name': 'private',
'enable_dhcp': True}
subnet = self.neutron.create_subnet({'subnet': subnet_params})
self.assertIsNotNone(subnet)
# Create VM
ovs = utils.OvsFlowsParser()
vm = objects.VMTestObj(self, self.neutron)
self.addCleanup(vm.close)
vm.create(network=network)
ip = vm.get_first_ipv4()
self.assertIsNotNone(ip)
mac = vm.get_first_mac()
self.assertIsNotNone(mac)
metadataid = utils.wait_until_is_and_return(
lambda: self._get_metadata_id(ovs.dump(self.integration_bridge),
ip, mac),
exception=Exception('Metadata id was not found in OpenFlow rules')
)
port = utils.wait_until_is_and_return(
lambda: utils.find_logical_port(self.nb_api, ip, mac),
exception=Exception('No port assigned to VM')
)
port_key = port.unique_key
port_key_hex = hex(port_key)
r = self._check_vlan_flows(ovs.dump(self.integration_bridge),
metadataid,
vlan_min,
port_key_hex,
mac)
for key, value in r.items():
self.assertIsNotNone(value, key)
vm.server.stop()
vm.close()
network.close()
def _check_tunnel_flows(self, flows, metadtata, segmentation_id,
port_key_hex, mac, tunnel_port_num):
l2_lookup_unicast_match = 'metadata=0x' + metadtata + \
',dl_dst=' + mac
l2_lookup_unicast_action = 'goto_table:' + \
str(const.EGRESS_TABLE)
l2_lookup_multicast_match = 'metadata=0x' + metadtata + ',dl_dst=' + \
'01:00:00:00:00:00/01:00:00:00:00:00'
l2_lookup_multicast_action = 'set_field:' + port_key_hex + \
'->reg7,resubmit(,' + \
str(const.EGRESS_TABLE) + ')' + \
',set_field:0' + \
'->reg7,resubmit(,' + \
str(const.EGRESS_TABLE) + ')'
ingress_match = ('tun_id=' + str(segmentation_id)
+ ",in_port=" + str(tunnel_port_num))
ingress_action = 'set_field:0x' + metadtata + '->metadata,' + \
'goto_table:' + \
str(const.INGRESS_DESTINATION_PORT_LOOKUP_TABLE)
l2_lookup_unicast_check = None
l2_lookup_multicast_check = None
ingress_check = None
for flow in flows:
if flow['table'] == str(const.L2_LOOKUP_TABLE):
if (l2_lookup_multicast_match in flow['match']):
if l2_lookup_multicast_action in flow['actions']:
l2_lookup_multicast_check = True
if (l2_lookup_unicast_match in flow['match']):
if l2_lookup_unicast_action in flow['actions']:
l2_lookup_unicast_check = True
if flow['table'] == str(
const.INGRESS_CLASSIFICATION_DISPATCH_TABLE):
if (ingress_match in flow['match']):
if ingress_action in flow['actions']:
ingress_check = True
return {'l2_lookup_multicast_check': l2_lookup_multicast_check,
'l2_lookup_unicast_check': l2_lookup_unicast_check,
'ingress_check': ingress_check}
def _check_vlan_flows(self, flows, metadtata, segmentation_id,
port_key_hex, mac):
l2_lookup_unicast_match = 'metadata=0x' + metadtata + \
',dl_dst=' + mac
l2_lookup_unicast_action = 'goto_table:' + \
str(const.EGRESS_TABLE)
l2_lookup_unknown_match = 'metadata=0x' + metadtata + \
',dl_dst=00:00:00:00:00:00/01:00:00:00:00:00'
l2_lookup_unkown_action = 'goto_table:' + \
str(const.EGRESS_TABLE)
l2_lookup_multicast_match = 'metadata=0x' + metadtata + ',dl_dst=' + \
'01:00:00:00:00:00/01:00:00:00:00:00'
l2_lookup_multicast_action = 'set_field:' + port_key_hex + \
'->reg7,resubmit(,' + \
str(const.EGRESS_TABLE) + ')' + \
',set_field:0' + \
'->reg7,resubmit(,' + \
str(const.EGRESS_TABLE) + ')'
egress_match = 'metadata=0x' + metadtata
egress_action = 'push_vlan:0x8100,set_field:' + \
str((segmentation_id & VLAN_MASK) | OFPVID_PRESENT) + \
"->vlan_vid,goto_table:" + \
str(const.EGRESS_EXTERNAL_TABLE)
ingress_match = 'dl_vlan=%s' % segmentation_id
ingress_action = 'set_field:0x' + metadtata + '->metadata,' \
'pop_vlan,goto_table:' + \
str(const.L2_LOOKUP_TABLE)
l2_lookup_unicast_check = None
l2_lookup_multicast_check = None
l2_lookup_unkown_check = None
egress_check = None
ingress_check = None
for flow in flows:
if flow['table'] == str(const.L2_LOOKUP_TABLE):
if (l2_lookup_multicast_match in flow['match']):
if l2_lookup_multicast_action in flow['actions']:
l2_lookup_multicast_check = True
continue
if (l2_lookup_unicast_match in flow['match']):
if l2_lookup_unicast_action in flow['actions']:
l2_lookup_unicast_check = True
continue
if (l2_lookup_unknown_match in flow['match']):
if l2_lookup_unkown_action in flow['actions']:
l2_lookup_unkown_check = True
continue
if flow['table'] == str(const.EGRESS_TABLE):
if (egress_match in flow['match']):
if egress_action in flow['actions']:
egress_check = True
continue
if flow['table'] == str(
const.INGRESS_CLASSIFICATION_DISPATCH_TABLE):
if (ingress_match in flow['match']):
if ingress_action in flow['actions']:
ingress_check = True
continue
return {'l2_lookup_multicast_check': l2_lookup_multicast_check,
'l2_lookup_unicast_check': l2_lookup_unicast_check,
'l2_lookup_unkown_check': l2_lookup_unkown_check,
'egress_vlan_tag': egress_check,
'ingress_check': ingress_check}
def test_flat_network_flows(self):
if not self._check_providers_net_app_enable():
return
physical_network = self._parse_flat_network()
if not physical_network:
self.assertIsNotNone(None)
return
# Create network
network = objects.NetworkTestObj(self.neutron, self.nb_api)
self.addCleanup(network.close)
network_params = {"name": "flat_1",
"provider:network_type": "flat",
"provider:physical_network": physical_network}
network_id = network.create(network=network_params)
# Create subnet
subnet_params = {'network_id': network_id,
'cidr': '192.168.127.12/24',
'gateway_ip': '10.64.1.1',
'ip_version': 4,
'name': 'private',
'enable_dhcp': True}
subnet = self.neutron.create_subnet({'subnet': subnet_params})
self.assertIsNotNone(subnet)
# Create VM
ovs = utils.OvsFlowsParser()
vm = objects.VMTestObj(self, self.neutron)
self.addCleanup(vm.close)
vm.create(network=network)
ip = vm.get_first_ipv4()
self.assertIsNotNone(ip)
mac = vm.get_first_mac()
self.assertIsNotNone(mac)
metadataid = utils.wait_until_is_and_return(
lambda: self._get_metadata_id(ovs.dump(self.integration_bridge),
ip, mac),
exception=Exception('Metadata id was not found in OpenFlow rules')
)
port = utils.wait_until_is_and_return(
lambda: utils.find_logical_port(self.nb_api, ip, mac),
exception=Exception('No port assigned to VM')
)
port_key = port.unique_key
port_key_hex = hex(port_key)
r = self._check_flat_flows(ovs.dump(self.integration_bridge),
metadataid, port_key_hex, mac)
for key, value in r.items():
self.assertIsNotNone(value, key)
vm.server.stop()
vm.close()
network.close()
return None
def _check_flat_flows(self, flows, metadtata,
port_key_hex, mac):
l2_lookup_unicast_match = 'metadata=0x' + metadtata + \
',dl_dst=' + mac
l2_lookup_unicast_action = 'goto_table:' + \
str(const.EGRESS_TABLE)
l2_lookup_unkown_match = 'metadata=0x' + metadtata + \
',dl_dst=00:00:00:00:00:00/01:00:00:00:00:00'
l2_lookup_unkown_action = 'goto_table:' + \
str(const.EGRESS_TABLE)
l2_lookup_multicast_match = 'metadata=0x' + metadtata + ',dl_dst=' + \
'01:00:00:00:00:00/01:00:00:00:00:00'
l2_lookup_multicast_action = 'set_field:' + port_key_hex + \
'->reg7,resubmit(,' + \
str(const.EGRESS_TABLE) + ')' + \
',set_field:0' + \
'->reg7,resubmit(,' + \
str(const.EGRESS_TABLE) + ')'
egress_match = 'metadata=0x' + metadtata
egress_action = 'goto_table:' + \
str(const.EGRESS_EXTERNAL_TABLE)
ingress_match = 'vlan_tci=0x0000/0x1fff'
ingress_action = 'set_field:0x' + metadtata + \
'->metadata,goto_table:' + \
str(const.L2_LOOKUP_TABLE)
l2_lookup_unicast_check = None
l2_lookup_multicast_check = None
l2_lookup_unkown_check = None
ingress_check = None
egress_check = None
for flow in flows:
if flow['table'] == str(const.L2_LOOKUP_TABLE):
if (l2_lookup_multicast_match in flow['match']):
if l2_lookup_multicast_action in flow['actions']:
l2_lookup_multicast_check = True
continue
if (l2_lookup_unicast_match in flow['match']):
if l2_lookup_unicast_action in flow['actions']:
l2_lookup_unicast_check = True
continue
if (l2_lookup_unkown_match in flow['match']):
if l2_lookup_unkown_action in flow['actions']:
l2_lookup_unkown_check = True
continue
if flow['table'] == str(const.EGRESS_TABLE):
if (egress_match in flow['match']):
if egress_action in flow['actions']:
egress_check = True
continue
if flow['table'] == str(
const.INGRESS_CLASSIFICATION_DISPATCH_TABLE):
if (ingress_match in flow['match']):
if ingress_action in flow['actions']:
ingress_check = True
continue
return {'l2_lookup_multicast_check': l2_lookup_multicast_check,
'l2_lookup_unicast_check': l2_lookup_unicast_check,
'l2_lookup_unkown_check': l2_lookup_unkown_check,
'egress_check': egress_check,
'ingress_check': ingress_check}
def _get_config_values(self, section, key):
readhandle = None
value = None
try:
config = configparser.ConfigParser()
readhandle = open(ML2_CONF_INI, 'r')
config.readfp(readhandle)
value = config.get(section, key)
except Exception:
value = None
if readhandle is not None:
try:
readhandle.close()
except Exception:
return value
return value
def _check_tunneling_app_enable(self):
return self._check_if_app_enabled(TUNNEL_NET_APP_NAME)
def _check_providers_net_app_enable(self):
return self._check_if_app_enabled(PROVIDER_NET_APP_NAME)
def _check_if_app_enabled(self, app_name):
if app_name in cfg.CONF.df.apps_list:
return True
return False
def _parse_network_vlan_ranges(self):
network_vlan_ranges = self._get_config_values('ml2_type_vlan',
'network_vlan_ranges')
if network_vlan_ranges is None:
return None
network_vlan_range_list = network_vlan_ranges.split(',')
if not network_vlan_range_list:
return None
network_vlan_range = network_vlan_range_list[0]
if ':' in network_vlan_range:
try:
physical_network, vlan_min, vlan_max = \
network_vlan_range.split(':')
except ValueError:
return None
else:
physical_network = network_vlan_range
vlan_min = VLAN_MIN_DEFAULT
return physical_network, vlan_min
def _parse_flat_network(self):
flat_networks = self._get_config_values('ml2_type_flat',
'flat_networks')
if flat_networks is None:
return None
flat_networks_list = flat_networks.split(',')
if not flat_networks_list:
return None
flat_network = flat_networks_list[0]
physical_network = 'phynet1'
if flat_network != '*':
physical_network = flat_network
return physical_network
"""
Ethernet frames with a value of 1 in the least-significant bit of the first
octet of the destination address are treated as multicast frames and are
flooded to all points on the network.
https://en.wikipedia.org/wiki/Multicast_address
"""
def _check_multicast_rule(self, flows, metadataid, tunnel_key_hex):
check = 'set_field:' + tunnel_key_hex + '->reg7,resubmit(,' + \
str(const.EGRESS_TABLE) | |
upgrades on
the cluster (UTC).
:type upgrade_pause_end_timestamp_utc: ~datetime.datetime
:param wave_upgrade_paused: Boolean to pause automatic runtime version upgrades to the cluster.
:type wave_upgrade_paused: bool
:param notifications: Indicates a list of notification channels for cluster events.
:type notifications: list[~azure.mgmt.servicefabric.models.Notification]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'add_on_features': {'key': 'properties.addOnFeatures', 'type': '[str]'},
'certificate': {'key': 'properties.certificate', 'type': 'CertificateDescription'},
'certificate_common_names': {'key': 'properties.certificateCommonNames', 'type': 'ServerCertificateCommonNames'},
'client_certificate_common_names': {'key': 'properties.clientCertificateCommonNames', 'type': '[ClientCertificateCommonName]'},
'client_certificate_thumbprints': {'key': 'properties.clientCertificateThumbprints', 'type': '[ClientCertificateThumbprint]'},
'cluster_code_version': {'key': 'properties.clusterCodeVersion', 'type': 'str'},
'event_store_service_enabled': {'key': 'properties.eventStoreServiceEnabled', 'type': 'bool'},
'fabric_settings': {'key': 'properties.fabricSettings', 'type': '[SettingsSectionDescription]'},
'node_types': {'key': 'properties.nodeTypes', 'type': '[NodeTypeDescription]'},
'reliability_level': {'key': 'properties.reliabilityLevel', 'type': 'str'},
'reverse_proxy_certificate': {'key': 'properties.reverseProxyCertificate', 'type': 'CertificateDescription'},
'upgrade_description': {'key': 'properties.upgradeDescription', 'type': 'ClusterUpgradePolicy'},
'application_type_versions_cleanup_policy': {'key': 'properties.applicationTypeVersionsCleanupPolicy', 'type': 'ApplicationTypeVersionsCleanupPolicy'},
'upgrade_mode': {'key': 'properties.upgradeMode', 'type': 'str'},
'sf_zonal_upgrade_mode': {'key': 'properties.sfZonalUpgradeMode', 'type': 'str'},
'vmss_zonal_upgrade_mode': {'key': 'properties.vmssZonalUpgradeMode', 'type': 'str'},
'infrastructure_service_manager': {'key': 'properties.infrastructureServiceManager', 'type': 'bool'},
'upgrade_wave': {'key': 'properties.upgradeWave', 'type': 'str'},
'upgrade_pause_start_timestamp_utc': {'key': 'properties.upgradePauseStartTimestampUtc', 'type': 'iso-8601'},
'upgrade_pause_end_timestamp_utc': {'key': 'properties.upgradePauseEndTimestampUtc', 'type': 'iso-8601'},
'wave_upgrade_paused': {'key': 'properties.waveUpgradePaused', 'type': 'bool'},
'notifications': {'key': 'properties.notifications', 'type': '[Notification]'},
}
def __init__(
self,
**kwargs
):
super(ClusterUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.add_on_features = kwargs.get('add_on_features', None)
self.certificate = kwargs.get('certificate', None)
self.certificate_common_names = kwargs.get('certificate_common_names', None)
self.client_certificate_common_names = kwargs.get('client_certificate_common_names', None)
self.client_certificate_thumbprints = kwargs.get('client_certificate_thumbprints', None)
self.cluster_code_version = kwargs.get('cluster_code_version', None)
self.event_store_service_enabled = kwargs.get('event_store_service_enabled', None)
self.fabric_settings = kwargs.get('fabric_settings', None)
self.node_types = kwargs.get('node_types', None)
self.reliability_level = kwargs.get('reliability_level', None)
self.reverse_proxy_certificate = kwargs.get('reverse_proxy_certificate', None)
self.upgrade_description = kwargs.get('upgrade_description', None)
self.application_type_versions_cleanup_policy = kwargs.get('application_type_versions_cleanup_policy', None)
self.upgrade_mode = kwargs.get('upgrade_mode', "Automatic")
self.sf_zonal_upgrade_mode = kwargs.get('sf_zonal_upgrade_mode', None)
self.vmss_zonal_upgrade_mode = kwargs.get('vmss_zonal_upgrade_mode', None)
self.infrastructure_service_manager = kwargs.get('infrastructure_service_manager', None)
self.upgrade_wave = kwargs.get('upgrade_wave', None)
self.upgrade_pause_start_timestamp_utc = kwargs.get('upgrade_pause_start_timestamp_utc', None)
self.upgrade_pause_end_timestamp_utc = kwargs.get('upgrade_pause_end_timestamp_utc', None)
self.wave_upgrade_paused = kwargs.get('wave_upgrade_paused', None)
self.notifications = kwargs.get('notifications', None)
class ClusterUpgradeDeltaHealthPolicy(msrest.serialization.Model):
"""Describes the delta health policies for the cluster upgrade.
All required parameters must be populated in order to send to Azure.
:param max_percent_delta_unhealthy_nodes: Required. The maximum allowed percentage of nodes
health degradation allowed during cluster upgrades.
The delta is measured between the state of the nodes at the beginning of upgrade and the state
of the nodes at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion to make sure the global
state of the cluster is within tolerated limits.
:type max_percent_delta_unhealthy_nodes: int
:param max_percent_upgrade_domain_delta_unhealthy_nodes: Required. The maximum allowed
percentage of upgrade domain nodes health degradation allowed during cluster upgrades.
The delta is measured between the state of the upgrade domain nodes at the beginning of
upgrade and the state of the upgrade domain nodes at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion for all completed upgrade
domains to make sure the state of the upgrade domains is within tolerated limits.
:type max_percent_upgrade_domain_delta_unhealthy_nodes: int
:param max_percent_delta_unhealthy_applications: Required. The maximum allowed percentage of
applications health degradation allowed during cluster upgrades.
The delta is measured between the state of the applications at the beginning of upgrade and
the state of the applications at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion to make sure the global
state of the cluster is within tolerated limits. System services are not included in this.
:type max_percent_delta_unhealthy_applications: int
:param application_delta_health_policies: Defines the application delta health policy map used
to evaluate the health of an application or one of its child entities when upgrading the
cluster.
:type application_delta_health_policies: dict[str,
~azure.mgmt.servicefabric.models.ApplicationDeltaHealthPolicy]
"""
_validation = {
'max_percent_delta_unhealthy_nodes': {'required': True, 'maximum': 100, 'minimum': 0},
'max_percent_upgrade_domain_delta_unhealthy_nodes': {'required': True, 'maximum': 100, 'minimum': 0},
'max_percent_delta_unhealthy_applications': {'required': True, 'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'max_percent_delta_unhealthy_nodes': {'key': 'maxPercentDeltaUnhealthyNodes', 'type': 'int'},
'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'maxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'},
'max_percent_delta_unhealthy_applications': {'key': 'maxPercentDeltaUnhealthyApplications', 'type': 'int'},
'application_delta_health_policies': {'key': 'applicationDeltaHealthPolicies', 'type': '{ApplicationDeltaHealthPolicy}'},
}
def __init__(
self,
**kwargs
):
super(ClusterUpgradeDeltaHealthPolicy, self).__init__(**kwargs)
self.max_percent_delta_unhealthy_nodes = kwargs['max_percent_delta_unhealthy_nodes']
self.max_percent_upgrade_domain_delta_unhealthy_nodes = kwargs['max_percent_upgrade_domain_delta_unhealthy_nodes']
self.max_percent_delta_unhealthy_applications = kwargs['max_percent_delta_unhealthy_applications']
self.application_delta_health_policies = kwargs.get('application_delta_health_policies', None)
class ClusterUpgradePolicy(msrest.serialization.Model):
"""Describes the policy used when upgrading the cluster.
All required parameters must be populated in order to send to Azure.
:param force_restart: If true, then processes are forcefully restarted during upgrade even when
the code version has not changed (the upgrade only changes configuration or data).
:type force_restart: bool
:param upgrade_replica_set_check_timeout: Required. The maximum amount of time to block
processing of an upgrade domain and prevent loss of availability when there are unexpected
issues. When this timeout expires, processing of the upgrade domain will proceed regardless of
availability loss issues. The timeout is reset at the start of each upgrade domain. The timeout
can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
:type upgrade_replica_set_check_timeout: str
:param health_check_wait_duration: Required. The length of time to wait after completing an
upgrade domain before performing health checks. The duration can be in either hh:mm:ss or in
d.hh:mm:ss.ms format.
:type health_check_wait_duration: str
:param health_check_stable_duration: Required. The amount of time that the application or
cluster must remain healthy before the upgrade proceeds to the next upgrade domain. The
duration can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
:type health_check_stable_duration: str
:param health_check_retry_timeout: Required. The amount of time to retry health evaluation when
the application or cluster is unhealthy before the upgrade rolls back. The timeout can be in
either hh:mm:ss or in d.hh:mm:ss.ms format.
:type health_check_retry_timeout: str
:param upgrade_timeout: Required. The amount of time the overall upgrade has to complete before
the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
:type upgrade_timeout: str
:param upgrade_domain_timeout: Required. The amount of time each upgrade domain has to complete
before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms
format.
:type upgrade_domain_timeout: str
:param health_policy: Required. The cluster health policy used when upgrading the cluster.
:type health_policy: ~azure.mgmt.servicefabric.models.ClusterHealthPolicy
:param delta_health_policy: The cluster delta health policy used when upgrading the cluster.
:type delta_health_policy: ~azure.mgmt.servicefabric.models.ClusterUpgradeDeltaHealthPolicy
"""
_validation = {
'upgrade_replica_set_check_timeout': {'required': True},
'health_check_wait_duration': {'required': True},
'health_check_stable_duration': {'required': True},
'health_check_retry_timeout': {'required': True},
'upgrade_timeout': {'required': True},
'upgrade_domain_timeout': {'required': True},
'health_policy': {'required': True},
}
_attribute_map = {
'force_restart': {'key': 'forceRestart', 'type': 'bool'},
'upgrade_replica_set_check_timeout': {'key': 'upgradeReplicaSetCheckTimeout', 'type': 'str'},
'health_check_wait_duration': {'key': 'healthCheckWaitDuration', 'type': 'str'},
'health_check_stable_duration': {'key': 'healthCheckStableDuration', 'type': 'str'},
'health_check_retry_timeout': {'key': 'healthCheckRetryTimeout', 'type': 'str'},
'upgrade_timeout': {'key': 'upgradeTimeout', 'type': 'str'},
'upgrade_domain_timeout': {'key': 'upgradeDomainTimeout', 'type': 'str'},
'health_policy': {'key': 'healthPolicy', 'type': 'ClusterHealthPolicy'},
'delta_health_policy': {'key': 'deltaHealthPolicy', 'type': 'ClusterUpgradeDeltaHealthPolicy'},
}
def __init__(
self,
**kwargs
):
super(ClusterUpgradePolicy, self).__init__(**kwargs)
self.force_restart = kwargs.get('force_restart', None)
self.upgrade_replica_set_check_timeout = kwargs['upgrade_replica_set_check_timeout']
self.health_check_wait_duration = kwargs['health_check_wait_duration']
self.health_check_stable_duration = kwargs['health_check_stable_duration']
self.health_check_retry_timeout = kwargs['health_check_retry_timeout']
self.upgrade_timeout = kwargs['upgrade_timeout']
self.upgrade_domain_timeout = kwargs['upgrade_domain_timeout']
self.health_policy = kwargs['health_policy']
self.delta_health_policy = kwargs.get('delta_health_policy', None)
class ClusterVersionDetails(msrest.serialization.Model):
"""The detail of the Service Fabric runtime version result.
:param code_version: The Service Fabric runtime version of the cluster.
:type code_version: str
:param support_expiry_utc: The date of expiry of support of the version.
:type support_expiry_utc: str
:param environment: Indicates if this version is for Windows or Linux operating system.
Possible values include: "Windows", "Linux".
:type environment: str or ~azure.mgmt.servicefabric.models.ClusterEnvironment
"""
_attribute_map = {
'code_version': {'key': 'codeVersion', 'type': 'str'},
'support_expiry_utc': {'key': 'supportExpiryUtc', 'type': 'str'},
'environment': {'key': 'environment', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClusterVersionDetails, self).__init__(**kwargs)
self.code_version = kwargs.get('code_version', None)
self.support_expiry_utc = kwargs.get('support_expiry_utc', None)
self.environment = kwargs.get('environment', None)
class DiagnosticsStorageAccountConfig(msrest.serialization.Model):
"""The storage account information for storing Service Fabric diagnostic logs.
All required parameters must be populated in order to send to Azure.
:param storage_account_name: Required. The Azure storage account name.
:type storage_account_name: str
:param protected_account_key_name: Required. The protected diagnostics storage key name.
:type protected_account_key_name: str
:param protected_account_key_name2: The secondary protected diagnostics storage key name. If
one of the storage account keys is rotated the cluster will fallback to using the other.
:type protected_account_key_name2: str
:param blob_endpoint: Required. The blob endpoint of the azure storage account.
:type blob_endpoint: str
:param queue_endpoint: Required. The queue endpoint of the azure storage account.
:type queue_endpoint: str
:param table_endpoint: Required. The table endpoint of the azure storage account.
:type table_endpoint: str
"""
_validation = {
'storage_account_name': {'required': True},
'protected_account_key_name': {'required': True},
'blob_endpoint': {'required': True},
'queue_endpoint': {'required': True},
'table_endpoint': {'required': True},
}
_attribute_map = {
'storage_account_name': {'key': 'storageAccountName', 'type': 'str'},
'protected_account_key_name': {'key': 'protectedAccountKeyName', 'type': | |
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'test_shell_windows_resource_files': [
'resources/test_shell.rc',
'resources/pan_east.cur',
'resources/pan_middle.cur',
'resources/pan_north.cur',
'resources/pan_north_east.cur',
'resources/pan_north_west.cur',
'resources/pan_south.cur',
'resources/pan_south_east.cur',
'resources/pan_south_west.cur',
'resources/pan_west.cur',
'resources/small.ico',
'resources/test_shell.ico',
'resource.h',
],
},
'targets': [
{
'target_name': 'test_shell_common',
'type': '<(library)',
'dependencies': [
'../../../app/app.gyp:app_base',
'../../../base/base.gyp:base',
'../../../base/base.gyp:base_i18n',
'../../../media/media.gyp:media',
'../../../net/net.gyp:net',
'../../../skia/skia.gyp:skia',
'../../../testing/gmock.gyp:gmock',
'../../../testing/gtest.gyp:gtest',
'../../../third_party/npapi/npapi.gyp:npapi',
'../../../third_party/WebKit/WebCore/WebCore.gyp/WebCore.gyp:webcore',
'../../../third_party/WebKit/WebKit/chromium/WebKit.gyp:webkit',
'../../webkit.gyp:appcache',
'../../webkit.gyp:database',
'../../webkit.gyp:glue',
'../../webkit.gyp:inspector_resources',
'npapi_layout_test_plugin',
],
'msvs_guid': '77C32787-1B96-CB84-B905-7F170629F0AC',
'sources': [
'mac/DumpRenderTreePasteboard.h',
'mac/DumpRenderTreePasteboard.m',
'mac/test_shell_webview.h',
'mac/test_shell_webview.mm',
'mac/test_webview_delegate.mm',
'mac/webview_host.mm',
'mac/webwidget_host.mm',
'accessibility_controller.cc',
'accessibility_controller.h',
'accessibility_ui_element.cc',
'accessibility_ui_element.h',
'drag_delegate.cc',
'drag_delegate.h',
'drop_delegate.cc',
'drop_delegate.h',
'event_sending_controller.cc',
'event_sending_controller.h',
'foreground_helper.h',
'layout_test_controller.cc',
'layout_test_controller.h',
'mock_webclipboard_impl.cc',
'mock_webclipboard_impl.h',
'plain_text_controller.cc',
'plain_text_controller.h',
'resource.h',
'simple_appcache_system.cc',
'simple_appcache_system.h',
'simple_clipboard_impl.cc',
'simple_database_system.cc',
'simple_database_system.h',
'simple_resource_loader_bridge.cc',
'simple_resource_loader_bridge.h',
'simple_socket_stream_bridge.cc',
'simple_socket_stream_bridge.h',
'test_navigation_controller.cc',
'test_navigation_controller.h',
'test_shell.cc',
'test_shell.h',
'test_shell_devtools_agent.cc',
'test_shell_devtools_agent.h',
'test_shell_devtools_callargs.cc',
'test_shell_devtools_callargs.h',
'test_shell_devtools_client.cc',
'test_shell_devtools_client.h',
'test_shell_gtk.cc',
'test_shell_x11.cc',
'test_shell_mac.mm',
'test_shell_platform_delegate.h',
'test_shell_platform_delegate_gtk.cc',
'test_shell_platform_delegate_mac.mm',
'test_shell_platform_delegate_win.cc',
'test_shell_request_context.cc',
'test_shell_request_context.h',
'test_shell_switches.cc',
'test_shell_switches.h',
'test_shell_win.cc',
'test_shell_webkit_init.h',
'test_shell_webthemecontrol.h',
'test_shell_webthemecontrol.cc',
'test_shell_webthemeengine.h',
'test_shell_webthemeengine.cc',
'test_web_worker.h',
'test_webview_delegate.cc',
'test_webview_delegate.h',
'test_webview_delegate_gtk.cc',
'test_webview_delegate_win.cc',
'text_input_controller.cc',
'text_input_controller.h',
'webview_host.h',
'webview_host_gtk.cc',
'webview_host_win.cc',
'webwidget_host.h',
'webwidget_host_gtk.cc',
'webwidget_host_win.cc',
],
'export_dependent_settings': [
'../../../base/base.gyp:base',
'../../../net/net.gyp:net',
'../../../third_party/WebKit/WebCore/WebCore.gyp/WebCore.gyp:webcore',
'../../../third_party/WebKit/WebKit/chromium/WebKit.gyp:webkit',
'../../webkit.gyp:glue',
],
'conditions': [
# http://code.google.com/p/chromium/issues/detail?id=18337
['target_arch!="x64" and target_arch!="arm"', {
'dependencies': [
'npapi_test_plugin',
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'dependencies': [
'test_shell_resources',
'../../../build/linux/system.gyp:gtk',
'../../../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck',
],
# for: test_shell_gtk.cc
'cflags': ['-Wno-multichar'],
}, { # else: OS!=linux
'sources/': [
['exclude', '_gtk\\.cc$'],
['exclude', '_x11\\.cc$'],
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
# See below TODO in the Windows branch.
'copies': [
{
'destination': '<(PRODUCT_DIR)/plugins',
'files': ['<(PRODUCT_DIR)/libnpapi_layout_test_plugin.so'],
},
],
}],
['OS!="mac"', {
'sources/': [
['exclude', 'mac/[^/]*\\.(cc|mm?)$'],
['exclude', '_mac\\.(cc|mm?)$'],
]
}],
['OS=="win"', {
'msvs_disabled_warnings': [ 4800 ],
'link_settings': {
'libraries': [
'-lcomctl32.lib',
],
},
'include_dirs': [
'../../../chrome/third_party/wtl/include',
'.',
],
'dependencies': [
'../../../breakpad/breakpad.gyp:breakpad_handler',
'../../default_plugin/default_plugin.gyp:default_plugin',
],
# TODO(bradnelson):
# This should really be done in the 'npapi_layout_test_plugin'
# target, but the current VS generator handles 'copies'
# settings as AdditionalDependencies, which means that
# when it's over there, it tries to do the copy *before*
# the file is built, instead of after. We work around this
# by attaching the copy here, since it depends on that
# target.
'copies': [
{
'destination': '<(PRODUCT_DIR)/plugins',
'files': ['<(PRODUCT_DIR)/npapi_layout_test_plugin.dll'],
},
],
}, { # else: OS!=win
'sources/': [
['exclude', '_win\\.cc$'],
['exclude', '_webtheme(control|engine)\.(cc|h)$'],
],
'sources!': [
'drag_delegate.cc',
'drop_delegate.cc',
],
}],
],
},
{
'target_name': 'test_shell_pak',
'type': 'none',
'variables': {
'repack_path': '../../../tools/data_pack/repack.py',
'pak_path': '<(INTERMEDIATE_DIR)/repack/test_shell.pak',
},
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'actions': [
{
'action_name': 'test_shell_repack',
'variables': {
'pak_inputs': [
'<(SHARED_INTERMEDIATE_DIR)/net/net_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/test_shell/test_shell_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_strings_en-US.pak',
],
},
'inputs': [
'<(repack_path)',
'<@(pak_inputs)',
],
'outputs': [
'<(pak_path)',
],
'action': ['python', '<(repack_path)', '<@(_outputs)', '<@(pak_inputs)'],
},
],
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': ['<(pak_path)'],
},
],
}],
],
},
{
'target_name': 'test_shell',
'type': 'executable',
'mac_bundle': 1,
'msvs_guid': 'FA39524D-3067-4141-888D-28A86C66F2B9',
'dependencies': [
'test_shell_common',
'../../../tools/imagediff/image_diff.gyp:image_diff',
],
'defines': [
# Technically not a unit test but require functions available only to
# unit tests.
'UNIT_TEST'
],
'sources': [
'test_shell_main.cc',
],
'mac_bundle_resources': [
'../../data/test_shell/',
'mac/English.lproj/InfoPlist.strings',
'mac/English.lproj/MainMenu.nib',
'mac/Info.plist',
'mac/test_shell.icns',
'resources/AHEM____.TTF',
],
'mac_bundle_resources!': [
# TODO(mark): Come up with a fancier way to do this (mac_info_plist?)
# that automatically sets the correct INFOPLIST_FILE setting and adds
# the file to a source group.
'mac/Info.plist',
],
'xcode_settings': {
'INFOPLIST_FILE': 'mac/Info.plist',
},
'conditions': [
['OS=="win"', {
'dependencies': ['layout_test_helper'],
'resource_include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit',
],
'sources': [
'<@(test_shell_windows_resource_files)',
# TODO: It would be nice to have these pulled in
# automatically from direct_dependent_settings in
# their various targets (net.gyp:net_resources, etc.),
# but that causes errors in other targets when
# resulting .res files get referenced multiple times.
'<(SHARED_INTERMEDIATE_DIR)/net/net_resources.rc',
'<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_resources.rc',
'<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_strings_en-US.rc',
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../../../base/allocator/allocator.gyp:allocator',
],
}],
],
'dependencies': [
'../../../build/linux/system.gyp:gtk',
'test_shell_resources',
'test_shell_pak',
],
}],
['OS=="mac"', {
'product_name': 'TestShell',
'dependencies': ['layout_test_helper'],
'variables': {
'repack_path': '../../../tools/data_pack/repack.py',
},
'actions': [
{
# TODO(mark): Make this work with more languages than the
# hardcoded en-US.
'action_name': 'repack_locale',
'variables': {
'pak_inputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_strings_en-US.pak',
'<(SHARED_INTERMEDIATE_DIR)/webkit/webkit_resources.pak',
],
},
'inputs': [
'<(repack_path)',
'<@(pak_inputs)',
],
'outputs': [
'<(INTERMEDIATE_DIR)/repack/test_shell.pak',
],
'action': ['python', '<(repack_path)', '<@(_outputs)', '<@(pak_inputs)'],
'process_outputs_as_mac_bundle_resources': 1,
},
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/TestShell.app/Contents/PlugIns/',
'files': [
'<(PRODUCT_DIR)/TestNetscapePlugIn.plugin/',
],
},
# TODO(ajwong): This, and the parallel chromium stanza below
# really should find a way to share file paths with
# ffmpeg.gyp so they don't diverge. (BUG=23602)
{
'destination': '<(PRODUCT_DIR)/TestShell.app/Contents/MacOS/',
'files': [
'<(PRODUCT_DIR)/libffmpegsumo.dylib',
],
},
],
}, { # OS != "mac"
'dependencies': [
'../../../net/net.gyp:net_resources',
'../../webkit.gyp:webkit_resources',
'../../webkit.gyp:webkit_strings',
]
}],
],
},
{
'target_name': 'test_shell_tests',
'type': 'executable',
'msvs_guid': 'E6766F81-1FCD-4CD7-BC16-E36964A14867',
'dependencies': [
'test_shell_common',
'../../../skia/skia.gyp:skia',
'../../../testing/gmock.gyp:gmock',
'../../../testing/gtest.gyp:gtest',
],
'sources': [
'../../../skia/ext/convolver_unittest.cc',
'../../../skia/ext/image_operations_unittest.cc',
'../../../skia/ext/platform_canvas_unittest.cc',
'../../../skia/ext/vector_canvas_unittest.cc',
'../../appcache/manifest_parser_unittest.cc',
'../../appcache/appcache_unittest.cc',
'../../appcache/appcache_database_unittest.cc',
'../../appcache/appcache_group_unittest.cc',
'../../appcache/appcache_host_unittest.cc',
'../../appcache/appcache_request_handler_unittest.cc',
'../../appcache/appcache_response_unittest.cc',
'../../appcache/appcache_storage_unittest.cc',
'../../appcache/appcache_storage_impl_unittest.cc',
'../../appcache/appcache_update_job_unittest.cc',
'../../appcache/appcache_url_request_job_unittest.cc',
'../../appcache/mock_appcache_service.h',
'../../appcache/mock_appcache_storage_unittest.cc',
'../../database/databases_table_unittest.cc',
'../../database/database_tracker_unittest.cc',
'../../database/database_util_unittest.cc',
'../../database/quota_table_unittest.cc',
'../../glue/bookmarklet_unittest.cc',
'../../glue/context_menu_unittest.cc',
'../../glue/cpp_bound_class_unittest.cc',
'../../glue/cpp_variant_unittest.cc',
'../../glue/dom_operations_unittest.cc',
'../../glue/dom_serializer_unittest.cc',
'../../glue/glue_serialize_unittest.cc',
'../../glue/iframe_redirect_unittest.cc',
'../../glue/media/buffered_data_source_unittest.cc',
'../../glue/media/media_resource_loader_bridge_factory_unittest.cc',
'../../glue/media/mock_media_resource_loader_bridge_factory.h',
'../../glue/media/simple_data_source_unittest.cc',
'../../glue/mimetype_unittest.cc',
'../../glue/mock_resource_loader_bridge.h',
'../../glue/multipart_response_delegate_unittest.cc',
'../../glue/plugins/plugin_lib_unittest.cc',
'../../glue/regular_expression_unittest.cc',
'../../glue/resource_fetcher_unittest.cc',
'../../glue/unittest_test_server.h',
'../../glue/webcursor_unittest.cc',
'../../glue/webframe_unittest.cc',
'../../glue/webkit_glue_unittest.cc',
'../../glue/webpasswordautocompletelistener_unittest.cc',
'../../glue/webplugin_impl_unittest.cc',
'../../glue/webview_unittest.cc',
'../webcore_unit_tests/BMPImageDecoder_unittest.cpp',
'../webcore_unit_tests/GKURL_unittest.cpp',
'../webcore_unit_tests/ICOImageDecoder_unittest.cpp',
'../webcore_unit_tests/UniscribeHelper_unittest.cpp',
'../webcore_unit_tests/XBMImageDecoder_unittest.cpp',
'../webcore_unit_tests/TransparencyWin_unittest.cpp',
'image_decoder_unittest.cc',
'image_decoder_unittest.h',
'keyboard_unittest.cc',
'layout_test_controller_unittest.cc',
'listener_leak_test.cc',
'media_leak_test.cc',
'node_leak_test.cc',
'plugin_tests.cc',
'run_all_tests.cc',
'test_shell_test.cc',
'test_shell_test.h',
'text_input_controller_unittest.cc',
],
'conditions': [
['OS=="win"', {
'resource_include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit',
],
'sources': [ '<@(test_shell_windows_resource_files)' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'dependencies': [
'test_shell_pak',
'../../../build/linux/system.gyp:gtk',
],
'sources!': [
# TODO(port)
'../../../skia/ext/platform_canvas_unittest.cc',
],
}],
['OS=="mac"', {
# mac tests load the resources from the built test_shell beside the
# test
'dependencies': ['test_shell'],
'sources!': [
# Disable the image decoder tests because we use CoreGraphics
# code on mac and these tests are for the Skia image-decoders.
'../webcore_unit_tests/BMPImageDecoder_unittest.cpp',
'../webcore_unit_tests/ICOImageDecoder_unittest.cpp',
'../webcore_unit_tests/XBMImageDecoder_unittest.cpp',
'image_decoder_unittest.cc',
'image_decoder_unittest.h',
],
'sources': [
'../../../skia/ext/skia_utils_mac_unittest.mm',
],
}],
['OS=="win"', {
'msvs_disabled_warnings': [ 4800 ],
}, { # else: OS!=win
'sources!': [
'../../../skia/ext/vector_canvas_unittest.cc',
'../webcore_unit_tests/UniscribeHelper_unittest.cpp',
'../webcore_unit_tests/TransparencyWin_unittest.cpp',
],
}],
['OS=="linux" or OS=="freebsd"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../../../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
],
},
{
'target_name': 'npapi_layout_test_plugin',
'type': 'loadable_module',
'mac_bundle': 1,
'msvs_guid': 'BE6D5659-A8D5-4890-A42C-090DD10EF62C',
'sources': [
'../npapi_layout_test_plugin/PluginObject.cpp',
'../npapi_layout_test_plugin/TestObject.cpp',
'../npapi_layout_test_plugin/main.cpp',
'../npapi_layout_test_plugin/npapi_layout_test_plugin.def',
'../npapi_layout_test_plugin/npapi_layout_test_plugin.rc',
],
'include_dirs': [
'../../..',
],
'dependencies': [
'../../../third_party/npapi/npapi.gyp:npapi',
'../../../third_party/WebKit/JavaScriptCore/JavaScriptCore.gyp/JavaScriptCore.gyp:wtf',
],
'msvs_disabled_warnings': [ 4996 ],
'mac_bundle_resources': [
'../npapi_layout_test_plugin/Info.r',
],
'xcode_settings': {
'INFOPLIST_FILE': '../npapi_layout_test_plugin/Info.plist',
},
'conditions': [
['OS!="win"', {
'sources!': [
'../npapi_layout_test_plugin/npapi_layout_test_plugin.def',
'../npapi_layout_test_plugin/npapi_layout_test_plugin.rc',
],
# TODO(bradnelson):
# This copy should really live here, as a post-build step,
# but it's currently being implemented via
# AdditionalDependencies, which tries to do the copy before
# the file is built...
#
}, { # OS == "win"
# # The old VS build would explicitly copy the .dll into the
# # plugins subdirectory like this. It might be possible to
# # use the 'product_dir' setting to build directly into
# # plugins/ (as is done on Linux), but we'd need to verify
# # that nothing breaks first.
# 'copies': [
# {
# 'destination': '<(PRODUCT_DIR)/plugins',
# 'files': ['<(PRODUCT_DIR)/npapi_layout_test_plugin.dll'],
# },
# ],
'link_settings': {
'libraries': [
"winmm.lib",
],
},
}],
['OS=="mac"', {
'product_name': 'TestNetscapePlugIn',
'product_extension': 'plugin',
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
],
},
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd") and (target_arch=="x64" or target_arch=="arm")', {
# Shared libraries need -fPIC on x86-64
'cflags': ['-fPIC']
}],
],
},
],
'conditions': [
['target_arch!="x64" and target_arch!="arm"', {
'targets': [
{
'target_name': 'npapi_test_plugin',
'type': 'loadable_module',
'mac_bundle': 1,
'msvs_guid': '0D04AEC1-6B68-492C-BCCF-808DFD69ABC6',
'dependencies': [
'../../../base/base.gyp:base',
'../../../third_party/icu/icu.gyp:icuuc',
'../../../third_party/npapi/npapi.gyp:npapi',
],
'sources': [
'../../glue/plugins/test/npapi_constants.cc',
'../../glue/plugins/test/npapi_constants.h',
'../../glue/plugins/test/npapi_test.cc',
'../../glue/plugins/test/npapi_test.def',
'../../glue/plugins/test/npapi_test.rc',
'../../glue/plugins/test/plugin_arguments_test.cc',
'../../glue/plugins/test/plugin_arguments_test.h',
'../../glue/plugins/test/plugin_client.cc',
'../../glue/plugins/test/plugin_client.h',
'../../glue/plugins/test/plugin_create_instance_in_paint.cc',
'../../glue/plugins/test/plugin_create_instance_in_paint.h',
'../../glue/plugins/test/plugin_delete_plugin_in_stream_test.cc',
'../../glue/plugins/test/plugin_delete_plugin_in_stream_test.h',
'../../glue/plugins/test/plugin_get_javascript_url_test.cc',
'../../glue/plugins/test/plugin_get_javascript_url_test.h',
'../../glue/plugins/test/plugin_get_javascript_url2_test.cc',
'../../glue/plugins/test/plugin_get_javascript_url2_test.h',
'../../glue/plugins/test/plugin_geturl_test.cc',
'../../glue/plugins/test/plugin_geturl_test.h',
'../../glue/plugins/test/plugin_javascript_open_popup.cc',
'../../glue/plugins/test/plugin_javascript_open_popup.h',
'../../glue/plugins/test/plugin_new_fails_test.cc',
'../../glue/plugins/test/plugin_new_fails_test.h',
'../../glue/plugins/test/plugin_npobject_lifetime_test.cc',
'../../glue/plugins/test/plugin_npobject_lifetime_test.h',
'../../glue/plugins/test/plugin_npobject_proxy_test.cc',
'../../glue/plugins/test/plugin_npobject_proxy_test.h',
'../../glue/plugins/test/plugin_schedule_timer_test.cc',
'../../glue/plugins/test/plugin_schedule_timer_test.h',
'../../glue/plugins/test/plugin_thread_async_call_test.cc',
'../../glue/plugins/test/plugin_thread_async_call_test.h',
'../../glue/plugins/test/plugin_windowed_test.cc',
'../../glue/plugins/test/plugin_windowed_test.h',
'../../glue/plugins/test/plugin_private_test.cc',
'../../glue/plugins/test/plugin_private_test.h',
'../../glue/plugins/test/plugin_test.cc',
'../../glue/plugins/test/plugin_test.h',
'../../glue/plugins/test/plugin_window_size_test.cc',
'../../glue/plugins/test/plugin_window_size_test.h',
'../../glue/plugins/test/plugin_windowless_test.cc',
'../../glue/plugins/test/plugin_windowless_test.h',
'../../glue/plugins/test/resource.h',
],
'include_dirs': [
'../../..',
],
'xcode_settings': {
'INFOPLIST_FILE': '../../glue/plugins/test/Info.plist',
},
'conditions': [
['OS!="win"', {
'sources!': [
# TODO(port): Port these.
# plugin_npobject_lifetime_test.cc has win32-isms
# (HWND, CALLBACK).
'../../glue/plugins/test/plugin_npobject_lifetime_test.cc',
# The window APIs are necessarily platform-specific.
'../../glue/plugins/test/plugin_window_size_test.cc',
'../../glue/plugins/test/plugin_windowed_test.cc',
# Seems windows specific.
'../../glue/plugins/test/plugin_create_instance_in_paint.cc',
'../../glue/plugins/test/plugin_create_instance_in_paint.h',
# windows-specific resources
'../../glue/plugins/test/npapi_test.def',
'../../glue/plugins/test/npapi_test.rc',
],
}],
['OS=="mac"', {
'product_extension': 'plugin',
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
],
},
}],
['OS=="linux" or OS=="freebsd"', {
'sources!': [
# Needs simple event record type porting
'../../glue/plugins/test/plugin_windowless_test.cc',
],
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd") and (target_arch=="x64" or target_arch=="arm")', {
# Shared libraries need -fPIC on x86-64
'cflags': ['-fPIC']
}],
],
},
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'targets': [
{
'target_name': | |
from __future__ import division
import numpy as np
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import MaxNLocator
from scipy.interpolate import LinearNDInterpolator
import matplotlib.pyplot as plt
import time
import sys
import os
sys.path.append("../")
import NMSSM_potential_CT as CTmod
###############################################################
inname = '../data/Scan_EXAMPLE'
###############################################################
# parameters for classification
###############################################################
SameXThreshold = 5. # min (Euclidean) distance [GeV] for two minima to be considered as distinct
StrongThreshold = 1. # min v_c/T_c (for Tcrit results) or v_n/T_n (for Tnucl results)
SFOEWPT_lTThreshold = 1. # criterion for sphaleron supression in low-T phase
SFOEWPT_hTThreshold = 0.5 # criterion for sphaleron supression in high-T phase
nuclCond = 140. # S_3/T threshold for nucleation
nuclCondThreshold = 0.1*nuclCond # tolerance for successful calculation of nucleation condition
Tmax = 1e3 # max temperature [GeV], should match cosmoTransition setting
dtFracMax = .25 # allowed tolerance for relative temperature difference when matching phases
CNSMThreshold = 0.1 # max allowed H^NSM mixing angle of SM-like Higgs
CSThreshold = 0.2 # max allowed H^S mixing angle of SM-like Higgs
muLEP=100 # chargino bound from LEP on mu/GeV
v2 = 172.**2 # squared vev of H^SM [Gev^2]
###############################################################
# load parameter lists
###############################################################
failed_params = np.loadtxt(inname+'/points_failed_general.txt')
good_params = np.loadtxt(inname+'/points_good_general.txt')
# remove points violating the mixing limits:
inds_mixing_fail = list(set(np.where(np.abs(good_params[:,14])*good_params[:,3] > CNSMThreshold)[0]) | set(np.where(np.abs(good_params[:,15]) > CSThreshold)[0]))
for i in inds_mixing_fail:
failed_params = np.concatenate((failed_params, [np.append(np.append(good_params[i,:-1], 0), good_params[i,-1])] ))
good_params = np.delete(good_params, inds_mixing_fail, axis=0)
header = ['index', 'lambda', 'kappa', 'tanbeta', 'mu/GeV', 'Alam/GeV', 'Akap/GeV', 'DeltaLambda2', 'M1', 'M2', 'mh125/GeV', 'mH/GeV', 'mhS/GeV', 'C_h125^SM', 'C_h125^NSM', 'C_h125^S', 'C_H^SM', 'C_H^NSM', 'C_H^S', 'C_hS^SM', 'C_hS^NSM', 'C_hS^S']
header_failed = header+['failFlag']
###############################################################
# get vs'
###############################################################
def add_vsprimeCW(param_list):
param_list_out = np.zeros((param_list.shape[0], param_list.shape[1]+1))
param_list_out[:,:-1] = param_list
for i in range(param_list.shape[0]):
NMSSMparams = param_list[i,1:10]
mod = CTmod.model1(NMSSMparams)
param_list_out[i,-1] = mod.get_vsprimeCW()
return param_list_out
failed_params = add_vsprimeCW(failed_params)
good_params = add_vsprimeCW(good_params)
header_failed += ['vspCW/GeV']
header_good = header+['vspCW/GeV']
###############################################################
# functions for categorization
###############################################################
def trans_start_from_trivial(transition, ind):
"""
returns True of the transition starts from the trivial phase,
False else
"""
hT_phase = np.loadtxt(inname+'/good_'+str(int(good_params[ind,0]))+'_phase_'+str(int(transition[7]))+'.txt')
if (np.linalg.norm(hT_phase[-1,0:3]-np.array([0.,0.,0.])) < SameXThreshold
and hT_phase[-1,3] == Tmax):
return True
else:
return False
def trans_end_physical(transition, ind):
"""
returns True if the transition ends in the physical phase,
False else
"""
lT_phase = np.loadtxt(inname+'/good_'+str(int(good_params[ind,0]))+'_phase_'+str(int(transition[3]))+'.txt')
Xphys = np.array([np.sqrt(2.*v2), 0., np.sqrt(2.)*good_params[ind,4]/good_params[ind,1]])
if (np.linalg.norm(lT_phase[0,0:3]-Xphys) < SameXThreshold
and lT_phase[0,3] == 0.):
return True
else:
return False
def trans_end_HSonly(transition, ind):
"""
returns True if the transition ends in a phase where H^SM = H^NSM = 0
False else
"""
lT_phase = np.loadtxt(inname+'/good_'+str(int(good_params[ind,0]))+'_phase_'+str(int(transition[3]))+'.txt')
if (np.linalg.norm(lT_phase[0,0:2]) < SameXThreshold
and np.abs(lT_phase[0,2]) > SameXThreshold):
return True
else:
return False
def trans_end_doublet_only(transition, ind):
"""
returns True if the transition ends in a phase where H^SM or H^NSM is different from 0
False else
"""
lT_phase = np.loadtxt(inname+'/good_'+str(int(good_params[ind,0]))+'_phase_'+str(int(transition[3]))+'.txt')
if (np.linalg.norm(lT_phase[0,0:2]) > SameXThreshold
and np.abs(lT_phase[0,2]) < SameXThreshold):
return True
else:
return False
def trans_strongly_first_order(transition):
"""
returns True if transition is strongly first order,
False else
"""
if transition[-1] == 1:
if np.linalg.norm(transition[0:3]-transition[4:7])/transition[8] > StrongThreshold:
return True
else:
return False
def trans_strongly_EW_first_order(transition):
"""
returns True if the EW part of the transition is strongly first order,
False else
"""
if transition[-1] == 1:
if np.linalg.norm(transition[0:2])/transition[8] > SFOEWPT_lTThreshold and np.linalg.norm(transition[4:6])/transition[8] < SFOEWPT_hTThreshold:
return True
else:
return False
def transition_real(transition, ind):
"""
checks if the transition is not just some numerical glitch
Returns False if transition appear like a glitch.
True else
"""
if transition[-1] == 1:
if np.linalg.norm(transition[0:3]-transition[4:7]) < SameXThreshold:
return False
else:
return True
if transition[-1] == 2:
# this is a second order phase transition. Need to get low-T location from phase info
T = transition[8]
lT_phase = np.loadtxt(inname+'/good_'+str(int(good_params[ind,0]))+'_phase_'+str(int(transition[3]))+'.txt')
Tind = np.argmin(np.abs(lT_phase[:,3]-T))
if np.linalg.norm(lT_phase[Tind,0:3]-transition[4:7]) < SameXThreshold:
return False
else:
return True
def check_nucl_calc_success(transition):
if transition[-1] == 1 and np.abs(transition[9]/transition[8] - nuclCond) < nuclCondThreshold:
return True
elif transition[-1] == 2 and transition[9] == 0:
return True
else:
return False
def check_ltphase_T(transition, ind):
"""
returns True if the low-temperature phase extends up to the transition temperature, False else
"""
lT_phase = np.loadtxt(inname+'/good_'+str(int(good_params[ind,0]))+'_phase_'+str(int(transition[3]))+'.txt')
if np.min(np.abs(lT_phase[:,3]-transition[8])) < dtFracMax*transition[8]:
return True
else:
return False
###############################################################
# do the categorization
# the indices of the categories of the points in good_params
# are stored in cat_inds_Tcrit (categorization based on critical
# temperature calculation) and cat_inds_Tnucl (categorization
# based on nucleation temperature calculation)
###############################################################
# categories
cat_labels = [r'no transitions',
r'1-a', # 1-step SFOEW
r'1-b', # 1-step FO
r'1-c', # 1-step 2nd
r'2(I)-a', # from (0,0,0) > (0,0,v_S'') > SFOEW (physical)
r'2(I)-b', # from (0,0,0) > (0,0,v_S'') > FO (physical)
r'2(I)-c', # from (0,0,0) > (0,0,v_S'') > 2nd (physical)
r'2(II)-aa', # from (0,0,0) > SFOEW (v1,v2,v_S'') > SFOEW (physical)
r'2(II)-ab', # from (0,0,0) > SFOEW (v1,v2,v_S'') > FO (physical)
r'2(II)-ac', # from (0,0,0) > SFOEW (v1,v2,v_S'') > 2nd (physical)
r'2(II)-ba', # from (0,0,0) > FO (v1,v2,v_S'') > SFOEW (physical)
r'2(II)-bb', # from (0,0,0) > FO (v1,v2,v_S'') > FO (physical)
r'2(II)-bc', # from (0,0,0) > FO (v1,v2,v_S'') > 2nd (physical)
r'2(II)-ca', # from (0,0,0) > 2nd (v1,v2,v_S'') > SFOEW (physical)
r'2(II)-cb', # from (0,0,0) > 2nd (v1,v2,v_S'') > FO (physical)
r'2(II)-cc', # from (0,0,0) > 2nd (v1,v2,v_S'') > 2nd (physical)
r'nucleation calculation failed',
r'something else...',
r'read error...']
cat_inds_Tcrit = []
cat_inds_Tnucl = []
for entry in cat_labels:
cat_inds_Tcrit.append([])
cat_inds_Tnucl.append([])
for i in range(good_params.shape[0]):
# check the critical-temperature calculation results
try:
out_Tcrit = np.loadtxt(inname+'/good_'+str(int(good_params[i,0]))+'_Tc_out.txt')
if out_Tcrit.shape[0] == 0: # empty transition file
cat_inds_Tcrit[0].append(i)
elif len(out_Tcrit.shape) == 1: # only one entry in transition file
transition = out_Tcrit
# check that the only transition starts from the trivial phase and ends in the physical phase
if trans_start_from_trivial(transition, i) and trans_end_physical(transition, i) and check_ltphase_T(transition, i):
if trans_strongly_EW_first_order(transition): # check if the transition is an SFOEWPT
cat_inds_Tcrit[1].append(i)
elif transition[-1] == 1: # or FO
cat_inds_Tcrit[2].append(i)
elif transition[-1] == 2: # or 2nd order
cat_inds_Tcrit[3].append(i)
else:
cat_inds_Tcrit[-2].append(i)
else:
cat_inds_Tcrit[-2].append(i)
else: # multiple transitions
# order the transition by temperature
j_Tc = np.argsort(out_Tcrit[:,8])[::-1]
# find the first transition from the high-T minimum
j_try = 0
transition = out_Tcrit[j_Tc[j_try],:]
while trans_start_from_trivial(transition, i) == False and j_try < len(j_Tc)-1 and check_ltphase_T(transition, i) == False:
j_try += 1
transition = out_Tcrit[j_Tc[j_try],:]
# throw point out if there is no transition startingfrom the high-T minimum
if j_try == len(j_Tc):
cat_inds_Tcrit[-2].append(i)
# collect further transitions
transition_list = [transition]
active_key = transition[3]
while j_try < len(j_Tc)-1:
j_try += 1
if out_Tcrit[j_Tc[j_try],7] == active_key and check_ltphase_T(out_Tcrit[j_Tc[j_try],:], i):
transition_list.append(out_Tcrit[j_Tc[j_try],:])
active_key = out_Tcrit[j_Tc[j_try],3]
# if there is only 1 step in the chain:
if len(transition_list) == 1:
transition = transition_list[0]
# check that the only transition starts from the trivial phase and ends in the physical phase
if trans_start_from_trivial(transition, i) and trans_end_physical(transition, i):
if trans_strongly_EW_first_order(transition): # check if the transition is an SFOEWPT
cat_inds_Tcrit[1].append(i)
elif transition[-1] == 1: # or FO
cat_inds_Tcrit[2].append(i)
elif transition[-1] == 2: # or 2nd order
cat_inds_Tcrit[3].append(i)
else:
cat_inds_Tcrit[-2].append(i)
else:
cat_inds_Tcrit[-2].append(i)
else: # there are multiple steps in the chain
transition_list_cleaned = []
j = 0
while j < len(transition_list):
if transition_real(transition_list[j], i):
transition_list_cleaned.append(transition_list[j])
else:
if len(transition_list_cleaned) > 0:
transition_list_cleaned[-1][3] = transition_list[j][3]
j += 1
# check if this is still a multi-step transition:
if len(transition_list_cleaned) == 1:
transition = transition_list_cleaned[0]
# check that the only transition starts from the trivial phase and ends in the physical phase
if trans_start_from_trivial(transition, i) and trans_end_physical(transition, i):
if trans_strongly_EW_first_order(transition): # check if the transition is an SFOEWPT
cat_inds_Tcrit[1].append(i)
elif transition[-1] == 1: # or FO
cat_inds_Tcrit[2].append(i)
elif transition[-1] == 2: # or 2nd order
cat_inds_Tcrit[3].append(i)
else:
cat_inds_Tcrit[-2].append(i)
else:
cat_inds_Tcrit[-2].append(i)
elif len(transition_list_cleaned) == 2:
# check that the first step starts from the trivial phase
if not trans_start_from_trivial(transition_list_cleaned[0], i):
cat_inds_Tcrit[-2].append(i)
# check that the last step ends in the physical phase
elif not trans_end_physical(transition_list_cleaned[-1], i):
cat_inds_Tcrit[-2].append(i)
else:
# is the intermediate phase singlet only?
if trans_end_HSonly(transition_list_cleaned[0], i):
if trans_strongly_EW_first_order(transition_list_cleaned[1]):
cat_inds_Tcrit[4].append(i)
elif transition_list_cleaned[1][-1] == 1:
cat_inds_Tcrit[5].append(i)
elif transition_list_cleaned[1][-1] == 2:
cat_inds_Tcrit[6].append(i)
else:
cat_inds_Tcrit[-2].append(i)
else:
if trans_strongly_EW_first_order(transition_list_cleaned[0]):
if trans_strongly_EW_first_order(transition_list_cleaned[1]):
cat_inds_Tcrit[7].append(i)
elif transition_list_cleaned[1][-1] == 1:
cat_inds_Tcrit[8].append(i)
elif transition_list_cleaned[1][-1] == 2:
cat_inds_Tcrit[9].append(i)
else:
cat_inds_Tcrit[-2].append(i)
elif transition_list_cleaned[0][-1] == 1:
if trans_strongly_EW_first_order(transition_list_cleaned[1]):
cat_inds_Tcrit[10].append(i)
elif transition_list_cleaned[1][-1] == 1:
cat_inds_Tcrit[11].append(i)
elif transition_list_cleaned[1][-1] == 2:
cat_inds_Tcrit[12].append(i)
else:
cat_inds_Tcrit[-2].append(i)
elif transition_list_cleaned[0][-1] == 2:
if trans_strongly_EW_first_order(transition_list_cleaned[1]):
cat_inds_Tcrit[13].append(i)
elif transition_list_cleaned[1][-1] == 1:
cat_inds_Tcrit[14].append(i)
elif transition_list_cleaned[1][-1] == 2:
cat_inds_Tcrit[15].append(i)
else:
cat_inds_Tcrit[-2].append(i)
else:
cat_inds_Tcrit[-2].append(i)
else:
cat_inds_Tcrit[-2].append(i)
except:
cat_inds_Tcrit[-1].append(i)
# and the same for the nucleation results
try:
out_Tnucl = np.loadtxt(inname+'/good_'+str(int(good_params[i,0]))+'_Tn_out.txt')
if out_Tnucl.shape[0] == 0: # empty transition file
| |
= get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return needed_budget
def func_654db418d7c34fa1a93ae5fab58b01ee(budget, cc, infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return exclude
def func_15fc4e8a0c284cec909c468addd83437(budget, cc, infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return remaining_budget
def func_29a5b238cecb4ad694a77b7534b2ce4f(budget, cc, infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return lowest_cnt
def func_eda06dcfdce646bbbf629d834be4b797(budget, cc, infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return ret
def func_20448dc8039c41d9b857401a4f19a9b2(budget, cc, infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return placed
def func_42aee3ed8aca42caaa61131c07e085ed(budget, cc, infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return seen
def func_a6dc24c1f29e42478b59ec268fdd0519(budget, cc, infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial | |
(Note: you can use libraries that implement the state machine concept)"
elif self.generic_yesno_input("Do you need to enforce pre or post conditions in a hierarchy", ref):
self.pattern = "Template Method (implemented as NVI Idiom in C++)"
elif self.generic_yesno_input("Do you need method chaining in a hierarchy", ref):
self.pattern = "CRTP Idiom"
elif self.generic_yesno_input("Do you need to create a set of components out of a set of orthogonal concepts", ref):
self.pattern = "Policy design, Mixins"
elif self.generic_yesno_input("Do you need to group a list of overloads (e.g. for visiting)", ref):
self.pattern = "Variadic Base Class with using directive"
elif self.generic_yesno_input("Do you need to regroup natural dependent properties of a type", ref):
self.pattern = "Traits Class"
else:
self.pattern = "Check GoF patterns and typical C++ idioms"
class ClassRecipeCook(RecipeCook):
'''Recipe to generate a class'''
def __init__(self, desc, template_path="templates"):
super().__init__(desc, template_path)
self.classattr = {}
self.has_impl = False
def show_dish_console_impl(self):
tpl_h = self.env.get_template(f"{self.classattr['type']}_class.h")
self.classattr["annotations"] = self.annotations
print(tpl_h.render(self.classattr))
if self.has_impl:
tpl_cpp = self.env.get_template(f"{self.classattr['type']}_class.cpp")
print("\n" + tpl_cpp.render(self.classattr))
def write_dish(self, odir: str):
header = f"{self.classattr['type']}_class.h"
tpl_h = self.env.get_template(header)
self.classattr["annotations"] = self.annotations
with open(Path(odir)/header, "w", encoding="utf8") as f:
f.write(tpl_h.render(self.classattr))
if not self.has_impl:
return
impl = f"{self.classattr['type']}_class.cpp"
tpl_cpp = self.env.get_template(impl)
with open(Path(odir)/impl, "w", encoding="utf8") as f:
f.write(tpl_cpp.render(self.classattr))
def class_root_step_name(self, ref):
self.classattr["classname"] = self.generic_identifier_input(
"Enter class name", ref)
def class_root_step_responsability(self, ref):
self.classattr["responsability"] = RecipeCook.custom_input(
"Enter class single role/responsability", ref)
def class_root_step_invariant(self, ref):
self.classattr["invariant"] = RecipeCook.custom_input(
"Enter class invariants description", ref)
@strong_input(InputType.LIST, ["thread-safe", "thread-compatible", "thread-incompatible"], "thread-incompatible")
def root_step_thread_safe_cb(self, ref):
return RecipeCook.custom_input("Enter thread-safety contrat (thread-safe: no api race, thread-compatible: no api race if not mutated, thread-incompatible, default: thread-incompatible)", ref)
def class_root_step_thread_safe(self, ref):
self.classattr["thread_safety"] = self.root_step_thread_safe_cb(ref)
@strong_input(InputType.LIST, ["concrete", "hierarchy"], "concrete")
def class_root_step_type_cb(self, ref):
return RecipeCook.custom_input("Enter class type (concrete, hierarchy, default: concrete)", ref)
def class_root_step_type(self, ref):
self.classattr["type"] = self.class_root_step_type_cb(ref)
@strong_input(InputType.INT)
def tpl_parameters_repeat(self):
return RecipeCook.custom_input("Enter template parameters count")
def tpl_parameters_initialize(self):
self.tparam = {}
def tpl_parameters_step_name(self, ref):
self.tparam["name"] = self.generic_misc_input(
"Enter template parameter full desc e.g. typename T, ...", ref)
def tpl_parameters_finalize(self):
self.classattr.setdefault("tparams", []).append(
copy.deepcopy(self.tparam))
@strong_input(InputType.LIST, ["thin", "thick", "verythick"], "thin")
def concrete_step_abstraction_cb(self, ref):
return RecipeCook.custom_input("Enter class abstraction (verythick for pimpl, thick for no-inline, thin otherwise, default: thin)", ref)
def concrete_step_abstraction(self, ref):
if not self.classattr["type"] in ["concrete"]:
return
if "tparams" in self.classattr:
self.classattr["abstraction"] = "thin"
else:
self.classattr["abstraction"] = self.concrete_step_abstraction_cb(
ref)
self.has_impl = (self.classattr["abstraction"] == "verythick")
def concrete_step_raii(self, ref):
if not self.classattr["type"] in ["concrete"]:
return
if self.classattr["abstraction"] == "verythick":
# Force raii attribute
self.classattr["raii"] = False
else:
self.classattr["raii"] = self.generic_yesno_input(
"Does the class acquire-release a resource at contruction/destruction", ref)
def concrete_step_specials(self, ref):
if not self.classattr["type"] in ["concrete"]:
return
if self.classattr["abstraction"] == "verythick" or self.classattr["raii"]:
self.classattr["specials"] = True
else:
self.classattr["specials"] = self.generic_yesno_input(
"Should one of the special members be defined", ref)
def concrete_step_alloc(self, ref):
if not self.classattr["type"] in ["concrete"]:
return
self.classattr["custom_allocators"] = self.generic_yesno_input(
"Does the class need custom allocation/deallocation overloads", ref)
def concrete_step_init_list_ctor(self, ref):
if not self.classattr["type"] in ["concrete"]:
return
self.classattr["init_list_ctor"] = self.generic_yesno_input(
"Does the class need an initializer list ctor", ref)
def hierarchy_step_base(self, ref):
if not self.classattr["type"] in ["hierarchy"]:
return
self.classattr["base"] = self.generic_yesno_input(
"Is it the base class in hierarchy", ref)
if not self.classattr["base"]:
self.classattr["basename"] = RecipeCook.custom_input(
"Enter the main base class name", ref)
def hierarchy_step_clonable(self, ref):
if not self.classattr["type"] in ["hierarchy"]:
return
self.classattr["clonable"] = self.generic_yesno_input(
"Is is part of a clonable hierarchy", ref)
def hierarchy_step_interface(self, ref):
if not self.classattr["type"] in ["hierarchy"]:
return
if self.classattr["base"]:
self.classattr["interface"] = self.generic_yesno_input(
"Does the hierarchy need a complete separation of interface", ref)
def hierarchy_step_inherited_init(self, ref):
if not self.classattr["type"] in ["hierarchy"]:
return
if not self.classattr["base"]:
self.classattr["init_with_base"] = not self.generic_yesno_input(
"Does the class need specific initialization (data members, ...)", ref)
class FunctionRecipeCook(RecipeCook):
'''Recipe to generate a function'''
def __init__(self, desc, template_path="templates"):
super().__init__(desc, template_path)
self.funcattr = {}
self.param = {}
self.tparam = {}
self.ret = {}
self.attr = {}
self.virtual = False
def show_dish_console_impl(self):
tpl_h = self.env.get_template("function.h")
self.funcattr["annotations"] = self.annotations
print(tpl_h.render(self.funcattr))
def write_dish(self, odir: str):
tpl_h = self.env.get_template("function.h")
self.funcattr["annotations"] = self.annotations
with open(Path(odir)/"function.h", "w", encoding="utf8") as f:
f.write(tpl_h.render(self.funcattr))
def func_root_step_name(self, ref):
self.funcattr["name"] = self.generic_identifier_input(
"Enter function name", ref)
def func_root_step_type(self, ref):
if self.generic_yesno_input("Is it a virtual method", ref):
self.funcattr["type"] = "method"
self.virtual = True
elif self.generic_yesno_input("Does it need access to a class internal representation aka a method", ref):
self.funcattr["type"] = "method"
self.virtual = False
else:
self.funcattr["type"] = "free"
def func_root_step_responsability(self, ref):
self.funcattr["responsability"] = RecipeCook.custom_input(
"Enter function single role/responsability", ref)
def func_root_step_thread_safe(self, ref):
self.funcattr["thread_hostile"] = self.generic_yesno_input(
"Can it cause API races at sites other than its inputs (e.g. static local variable)", ref)
@strong_input(InputType.INT)
def tpl_parameters_repeat(self):
return "0" if self.virtual else RecipeCook.custom_input("Enter template parameters count")
def tpl_parameters_initialize(self):
self.tparam = {}
def tpl_parameters_step_name(self, ref):
self.tparam["name"] = self.generic_misc_input(
"Enter template parameter full desc e.g. typename T, ...", ref)
def tpl_parameters_finalize(self):
self.funcattr.setdefault("tparams", []).append(
copy.deepcopy(self.tparam))
@strong_input(InputType.INT)
def func_parameters_repeat(self):
return RecipeCook.custom_input("Enter regular parameters count")
def func_parameters_initialize(self):
self.param = {}
self.param.setdefault("comments", [])
def func_parameters_step_name(self, ref):
self.param["name"] = self.generic_identifier_input(
"Enter parameter name (without type)", ref)
def func_parameters_step_type_react(self, ref, msg):
self.print_live_annotation("TIPS", ref, msg)
if self.generic_yesno_input("Would you like to update param type according to tips", ref):
self.param["type"] = self.generic_type_input(
f"Enter parameter type to replace {self.param['type']}", ref)
def func_parameters_step_type(self, ref):
self.param["type"] = self.generic_type_input(
"Enter parameter type e.g. int, const Obj&, ...", ref)
if not self.generic_yesno_input("Would you like to trigger advanced mode for parameter type fine-tuning", ref):
return
param_type = "const X&"
if self.generic_yesno_input("Is it an in-out param", ref):
param_type = "X&"
elif self.generic_yesno_input("Will the arg be passed onward to other code and not directly used by this function", ref):
param_type = "T&& + std::forward"
elif self.generic_yesno_input("Is it cheap to copy e.g. int, pointer, smart pointers", ref) or \
"unique_ptr" in self.param["type"]:
param_type = "X"
elif not self.generic_yesno_input("Is it going to be stored or moved", ref):
param_type = "const X&"
elif self.generic_yesno_input("Will it be unconditionnaly moved", ref):
param_type = "X&&"
elif self.generic_yesno_input("Will it be copied", ref):
if self.generic_yesno_input("Is it expansive to move (e.g. bigPOD, large array)", ref):
param_type = "const X&"
elif self.generic_yesno_input("Is it copyable, copied on all paths, cheap to move (e.g. std::string, std::vector) and move constructed", ref):
param_type = "X"
else:
param_type = "const X& + X&& overloads or T&& + std::forward"
self.func_parameters_step_type_react(ref, f"pass arg of type X as {param_type}")
def func_parameters_step_weaktype(self, ref):
if "void" in self.param["type"]:
self.func_parameters_step_type_react(ref, "void is a weak type")
def func_parameters_step_array(self, ref):
if any(p in self.param["type"] for p in ["*", "[]"]) and \
self.generic_yesno_input("Does the param represent an array", ref):
self.func_parameters_step_type_react(
ref, "array should be passed as std::array or span")
def func_parameters_step_smartpointer(self, ref):
if any(p in self.param["type"] for p in ["shared_ptr", "unique_ptr"]) and \
not self.generic_yesno_input("Will ownership be transferred or shared", ref):
self.func_parameters_step_type_react(
ref, "T* or T& is better for general use")
def func_parameters_step_cstring(self, ref):
if re.match(".*char\s*\*", self.param["type"]):
self.func_parameters_step_type_react(
ref, "consider alternative like string_view")
def func_parameters_step_collision(self, ref):
if "params" in self.funcattr and \
self.param["type"] == self.funcattr["params"][-1]["type"]:
self.func_parameters_step_type_react(
ref, "same type as previous param can be confusing")
def func_parameters_step_boollist(self, ref):
if "params" in self.funcattr and \
"bool" == self.param["type"] and \
any("bool" == p["type"] for p in self.funcattr["params"]):
self.param["comments"].append(RecipeCook.format_input(
"more than one bool param, consider using flags", ref))
def func_parameters_step_argscount(self, ref):
if "params" in self.funcattr and len(self.funcattr["params"]) >= 4:
self.param["comments"].append(RecipeCook.format_input(
"more than 4 params, maybe missing an abstration or function has too many responsabilities", ref))
def func_parameters_finalize(self):
self.param["comments"] = ",".join(self.param["comments"])
self.funcattr.setdefault("params", []).append(
copy.deepcopy(self.param))
def func_ret_step_type_react(self, ref, msg):
self.print_live_annotation("TIPS", ref, msg)
if self.generic_yesno_input("Would you like to update return type according to tips", ref):
self.ret["rtype"] = self.generic_type_input(
"Enter return type", ref)
else:
self.ret["comments"].append(RecipeCook.format_input(
msg, ref))
def func_ret_initialize(self):
self.ret = {}
self.ret.setdefault("comments", [])
def func_ret_step_type(self, ref):
self.ret["rtype"] = self.generic_type_input(
"Enter return type", ref)
def func_ret_step_ownership(self, ref):
if "*" in self.ret["rtype"] and self.generic_yesno_input("Is it a transfer of ownership", ref):
self.func_ret_step_type_react(
ref, "beware ownership transferred with raw pointer, consider smart pointer")
def func_ret_step_byvalue(self, ref):
if self.ret["rtype"] == "void":
return
force_not_byvalue = self.generic_yesno_input(
"Does it return a container item, enable write access to some representation, or enable chain calls", ref)
if force_not_byvalue and (self.ret["rtype"] == "auto" or
not any(ret in self.ret["rtype"] for ret in ["*", "&"])):
self.func_ret_step_type_react(
ref, "you should not return by value (e.g. | |
= torch.tensor(bleu_rewards).float().mean()
quantities_to_log["bleu"].append(reward)
if self._include_perplexity is True:
nll_reward = (
self._compute_nll_reward(
sentences=generated_texts))
reward = reward + nll_reward
quantities_to_log["nll"].append(nll_reward)
rewards.append(reward)
# all_classifier_outputs.append(classifier_outputs)
except ValueError as err:
# This happens when the generated text itself includes the
# `</s>` token, which does happen and will cause the classifier to fail.
# So we just ignore this error and give a score of zero for this batch.
if str(err) != "All examples must have the same number of <eos> tokens.":
raise err
click.secho("Encountered an error, skipping ...", bg="red")
rewards.append(torch.tensor(0.).to(device))
rewards_tensor = torch.stack(rewards)
rewards_log = dict(
(reward_key, torch.stack(reward_vals, dim=0).mean())
for reward_key, reward_vals in quantities_to_log.items())
if self._return_intermediate_outputs is True:
rewards_log["quantities_to_log"] = quantities_to_log # type: ignore
rewards_log["formatted_prompts"] = formatted_prompts # type: ignore
rewards_log["generator_outputs"] = generator_outputs # type: ignore
# rewards_log["all_classifier_outputs"] = all_classifier_outputs # type: ignore
if to_tensor is True:
return rewards_tensor, rewards_log
else:
return rewards_tensor.tolist(), rewards_log
def __call__(
self,
sources: List[str],
targets: List[str],
predictions: List[str],
to_tensor: bool,
mode: str,
) -> Tuple[Union[List[float], FloatTensor], Dict[str, Any]]:
return self.forward(
sources=sources,
prompts=predictions,
to_tensor=to_tensor,
mode=mode)
class GPT2BLEUNoInputReward(object):
TST_TEMPLATES_FILE_NAME = "/workspace/soft-Q-learning-for-text-generation/experiments/tst-templates-no-task.txt"
def __init__(
self,
max_length: int = 60,
num_return_sequences_train: int = 2,
num_return_sequences_infer: int = 100,
# topic_scores_aggregator: Optional[Callable[[List[float]], Union[float, np.number]]] = None,
include_perplexity: bool = True,
return_intermediate_outputs: bool = False,
) -> None:
if include_perplexity is True:
sql_utils.colorful_warning("Adding Perplexity-based Reward", bg="blue")
sql_utils.colorful_warning(f"max_length={max_length}", bg="blue")
# https://huggingface.co/gpt2
# https://huggingface.co/facebook/bart-large-mnli
self._generator = pipeline(
"text-generation",
model="distilgpt2",
device=0)
self._max_length = max_length
self._num_return_sequences_train = num_return_sequences_train
self._num_return_sequences_infer = num_return_sequences_infer
self._tst_templates = self.load_tst_templates()
self._tst_inputs = self._load_tst_inputs()
self._tst_inputs_idx = {'train': 0, 'infer': 0}
# Technically, adding perplexity-based reward will break
# the scale, but we will ignore this for now since
# this number is relatively small.
self._include_perplexity = include_perplexity
# Do not set is to `True` during training, use it for debugging.
self._return_intermediate_outputs = return_intermediate_outputs
def load_tst_templates(self) -> List[str]:
with open(self.TST_TEMPLATES_FILE_NAME) as f:
tst_templates = [d.strip() for d in f.readlines()]
return tst_templates
def _load_tst_inputs(self) -> Dict[Tuple[str], List[str]]:
tst_inputs = {}
# tokenizer = self._generator.tokenizer
filepath_train_0 = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2-control-only/raw/sentiment.train.0"
filepath_train_1 = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2-control-only/raw/sentiment.train.1"
filepath_dev_0 = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2-control-only/raw/sentiment.dev.0"
filepath_dev_1 = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2-control-only/raw/sentiment.dev.1"
with open(filepath_train_0) as f:
sentences_train_0 = [line.strip() for line in f]
with open(filepath_train_1) as f:
sentences_train_1 = [line.strip() for line in f]
with open(filepath_dev_0) as f:
sentences_dev_0 = [line.strip() for line in f]
with open(filepath_dev_1) as f:
sentences_dev_1 = [line.strip() for line in f]
import random
sentences_train = sentences_train_0 + sentences_train_1
random.seed(0)
random.shuffle(sentences_train)
tst_inputs['train'] = sentences_train
tst_inputs['infer'] = sentences_dev_0[:5] + sentences_dev_1[:5]
return tst_inputs
def _convert_tokens_to_string(self, tokens: List[str]) -> List[str]:
return [self._generator.tokenizer
.convert_tokens_to_string(s.split())
for s in tokens]
def _format_prompts(self, source_strings: List[str], prompt_strings: List[str]) -> List[str]:
# templates = np.random.choice(
# self._tst_templates,
# size=len(prompt_strings),
# # we use with-replacement here
# replace=True,).tolist()
# print(templates)
template = self._tst_templates[0]
return [
template.format(sentence_1=s_1, prompt=p) for s_1, p
in zip(source_strings, prompt_strings)]
def _compute_nll_reward(self, sentences: List[str]) -> FloatTensor:
nlls, _ = compute_perplexities(
sentences=sentences,
model=self._generator.model,
tokenizer=self._generator.tokenizer)
# When the sentence has just one token,
# the NLL/perplexity will be `NaN`.
# Further, we use the negative NLL as the reward
return -torch.nan_to_num(nlls, nan=10.0).mean()
def _get_inputs(self, mode: str, batch_size: int):
data = self._tst_inputs[mode]
idx = self._tst_inputs_idx[mode]
inputs = []
for _ in range(batch_size):
inputs.append(data[idx])
idx += 1
idx %= len(data)
self._tst_inputs_idx[mode] = idx
return inputs
def forward(self, control_codes: List[str], prompts: List[str], to_tensor: bool, mode: str) -> Tuple[Union[List[float], FloatTensor], Dict[str, Any]]:
if mode not in ["train", "infer"]:
raise ValueError
if mode == "train":
num_return_sequences = self._num_return_sequences_train
if mode == "infer":
num_return_sequences = self._num_return_sequences_infer
# - List of length `len(prompts)`
# - List of length `num_return_sequences`
# - Dict of {"generated_text": str}
batch_size = len(control_codes)
source_strings = self._get_inputs(mode, batch_size)
prompt_strings = self._convert_tokens_to_string(prompts)
formatted_prompts = self._format_prompts(source_strings, prompt_strings)
generator_outputs: List[List[Dict[str, Any]]] = self._generator(
formatted_prompts,
max_length=self._max_length,
num_return_sequences=num_return_sequences,
# Only return generated text, without the prompt
return_full_text=False)
rewards: List[FloatTensor] = []
quantities_to_log: Dict[str, List[FloatTensor]] = defaultdict(list)
for batch_index in range(len(prompts)):
# generated_texts = [
# output["generated_text"] for output in
# generator_outputs[batch_index]]
generated_texts = []
for output in generator_outputs[batch_index]:
text = output["generated_text"]
try:
end = text.index('"')
except ValueError:
end = len(text)
generated_texts.append(text[:end])
if mode == "infer":
print(f"Formatted Prompt: {formatted_prompts[batch_index]};",
f"Output: {generated_texts[0]}")
# - List of length `len(generated_texts)`
# - Dict of {
# "labels": List of length `num_topics`,
# "scores": List of length `num_topics`,
# "sequence": str,
# }
try:
reference_texts = [source_strings[batch_index] for _ in generator_outputs[batch_index]]
check_Xs_Ys_sizes(generated_texts, reference_texts)
# Using a faster BLEU implementation during training
# `sacrebleu` is ~3X faster than `lightning`
# `sacrebleu-parallel` is ~3X faster than `sacrebleu`
bleus = [
scb.sentence_bleu(
hypothesis=x,
references=[y])
for x, y in zip(
generated_texts,
reference_texts)
]
bleu_rewards = [b.score for b in bleus]
reward = torch.tensor(bleu_rewards).float().mean()
quantities_to_log["bleu"].append(reward)
if self._include_perplexity is True:
nll_reward = (
self._compute_nll_reward(
sentences=generated_texts))
reward = reward + nll_reward
quantities_to_log["nll"].append(nll_reward)
rewards.append(reward)
except ValueError as err:
# This happens when the generated text itself includes the
# `</s>` token, which does happen and will cause the classifier to fail.
# So we just ignore this error and give a score of zero for this batch.
if str(err) != "All examples must have the same number of <eos> tokens.":
raise err
click.secho("Encountered an error, skipping ...", bg="red")
rewards.append(torch.tensor(0.).to(device))
rewards_tensor = torch.stack(rewards)
rewards_log = dict(
(reward_key, torch.stack(reward_vals, dim=0).mean())
for reward_key, reward_vals in quantities_to_log.items())
if self._return_intermediate_outputs is True:
rewards_log["quantities_to_log"] = quantities_to_log # type: ignore
rewards_log["formatted_prompts"] = formatted_prompts # type: ignore
rewards_log["generator_outputs"] = generator_outputs # type: ignore
# rewards_log["all_classifier_outputs"] = all_classifier_outputs # type: ignore
if to_tensor is True:
return rewards_tensor, rewards_log
else:
return rewards_tensor.tolist(), rewards_log
def __call__(
self,
sources: List[str],
targets: List[str],
predictions: List[str],
to_tensor: bool,
mode: str,
) -> Tuple[Union[List[float], FloatTensor], Dict[str, Any]]:
return self.forward(
control_codes=sources,
prompts=predictions,
to_tensor=to_tensor,
mode=mode)
class GPT2SentimentNoInputReward(object):
TST_TEMPLATES_FILE_NAME = "/workspace/soft-Q-learning-for-text-generation/experiments/tst-templates-no-task.txt"
# TST_TEMPLATES_FILE_NAME = "/workspace/soft-Q-learning-for-text-generation/experiments/tst-templates-no-task-no-source.txt"
TST_CLF_CONFIG = dict(model=("/workspace/soft-Q-learning-for-text-generation/experiments/yelp_sentiment_classifier/"
"results-bert-base/checkpoint-10410/"),
tokenizer='bert-base-uncased')
def __init__(
self,
max_length: int = 60,
num_return_sequences_train: int = 2,
num_return_sequences_infer: int = 100,
# topic_scores_aggregator: Optional[Callable[[List[float]], Union[float, np.number]]] = None,
include_perplexity: bool = True,
return_intermediate_outputs: bool = False,
) -> None:
if include_perplexity is True:
sql_utils.colorful_warning("Adding Perplexity-based Reward", bg="blue")
sql_utils.colorful_warning(f"max_length={max_length}", bg="blue")
# https://huggingface.co/gpt2
# https://huggingface.co/facebook/bart-large-mnli
self._generator = pipeline(
"text-generation",
model="distilgpt2",
device=0)
self._classifier = pipeline(
"sentiment-analysis",
model=self.TST_CLF_CONFIG['model'],
tokenizer=self.TST_CLF_CONFIG['tokenizer'],
device=0)
self._max_length = max_length
self._num_return_sequences_train = num_return_sequences_train
self._num_return_sequences_infer = num_return_sequences_infer
self._tst_templates = self.load_tst_templates()
self._tst_inputs = self._load_tst_inputs()
self._tst_inputs_idx = {('train', 'LABEL_0'): 0,
('train', 'LABEL_1'): 0,
('infer', 'LABEL_0'): 0,
('infer', 'LABEL_1'): 0}
# Technically, adding perplexity-based reward will break
# the scale, but we will ignore this for now since
# this number is relatively small.
self._include_perplexity = include_perplexity
# Do not set is to `True` during training, use it for debugging.
self._return_intermediate_outputs = return_intermediate_outputs
def load_tst_templates(self) -> List[str]:
with open(self.TST_TEMPLATES_FILE_NAME) as f:
tst_templates = [d.strip() for d in f.readlines()]
return tst_templates
def _load_tst_inputs(self) -> Dict[Tuple[Any], List[str]]:
tst_inputs = {}
# tokenizer = self._generator.tokenizer
filepath_train_0 = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2-control-only/raw/sentiment.train.0"
filepath_train_1 = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2-control-only/raw/sentiment.train.1"
filepath_dev_0 = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2-control-only/raw/sentiment.dev.0"
filepath_dev_1 = "/workspace/soft-Q-learning-for-text-generation/data/yelp-gpt2-control-only/raw/sentiment.dev.1"
with open(filepath_train_0) as f:
sentences_train_0 = [line.strip() for line in f]
with open(filepath_train_1) as f:
sentences_train_1 = [line.strip() for line in f]
with open(filepath_dev_0) as f:
sentences_dev_0 = [line.strip() for line in f]
with open(filepath_dev_1) as f:
sentences_dev_1 = [line.strip() for line in f]
tst_inputs[('train', 'LABEL_0')] = sentences_train_1
tst_inputs[('train', 'LABEL_1')] = sentences_train_0
tst_inputs[('infer', 'LABEL_0')] = sentences_dev_1[:5]
tst_inputs[('infer', 'LABEL_1')] = sentences_dev_0[:5]
return tst_inputs
def _convert_tokens_to_string(self, tokens: List[str]) -> List[str]:
return [self._generator.tokenizer
.convert_tokens_to_string(s.split())
for s in tokens]
def _format_prompts(self, source_strings: List[str], prompt_strings: List[str]) -> List[str]:
template = self._tst_templates[0]
# return [
# template.format(prompt=p) for s_1, p
# in zip(source_strings, prompt_strings)]
return [
template.format(sentence_1=s_1, prompt=p) for s_1, p
in zip(source_strings, prompt_strings)]
def _compute_nll_reward(self, sentences: List[str]) -> FloatTensor:
nlls, _ = compute_perplexities(
sentences=sentences,
model=self._generator.model,
tokenizer=self._generator.tokenizer)
# When the sentence has just one token,
# the NLL/perplexity will be `NaN`.
# Further, we use the negative NLL as the reward
return -torch.nan_to_num(nlls, nan=10.0).mean()
def _get_inputs(self, mode: str, target_labels: List[str]):
# data_0 = self._tst_inputs[(mode, 'LABEL_0')]
# data_1 = self._tst_inputs[(mode, | |
added
render_context.update( {"full_report" : 'yes'} )
createReport( html_report, 'report.html', render_context )
def createBlockReport():
'''Called at the end of run to create a block.html report. Use 'pass' if not wanted.'''
if pluginParams['cmdOptions'].cmdline: return
printtime("Creating block report...")
if pluginParams['barcoded']:
render_context = {
"run_name" : pluginParams['prefix'],
"barcode_results" : simplejson.dumps(barcodeSummary)
}
tplate = 'barcode_block.html'
else:
render_context = pluginResult.copy()
render_context.update(pluginReport)
tplate = 'report_block.html'
createReport( pluginParams['block_report'], tplate, render_context )
def createProgressReport(progessMsg,last=False):
'''General method to write a message directly to the block report, e.g. when starting prcessing of a new barcode.'''
createReport( pluginParams['block_report'], "progress_block.html", { "progress_text" : progessMsg, "refresh" : "" if last else "refresh" } )
#
# --------------- Base code for standard plugin runs -------------
#
def getOrderedBarcodes():
if NONBARCODED in barcodeInput:
return []
barcodes = {}
for barcode in barcodeInput:
if barcode == NOMATCH: continue
barcodes[barcode] = barcodeInput[barcode]["barcode_index"]
return sorted(barcodes,key=barcodes.get)
def parseCmdArgs():
'''Process standard command arguments. Customized for additional debug and other run options.'''
# standard run options here - do not remove
parser = OptionParser()
parser.add_option('-V', '--version', help='Version string for tracking in output', dest='version', default='')
parser.add_option('-X', '--min_bc_bam_size', help='Minimum file size required for barcode BAM processing', type="int", dest='minbamsize', default=0)
parser.add_option('-c', '--cmdline', help='Run command line only. Reports will not be generated using the HTML templates.', action="store_true", dest='cmdline')
parser.add_option('-d', '--scraper', help='Create a scraper folder of links to output files using name prefix (-P).', action="store_true", dest='scraper')
parser.add_option('-k', '--keep_temp', help='Keep intermediate files. By default these are deleted after a successful run.', action="store_true", dest='keep_temp')
parser.add_option('-l', '--log', help='Output extra progress Log information to STDERR during a run.', action="store_true", dest='logopt')
parser.add_option('-p', '--purge_results', help='Remove all folders and most files from output results folder.', action="store_true", dest='purge_results')
parser.add_option('-s', '--skip_analysis', help='Skip re-generation of existing files but make new report.', action="store_true", dest='skip_analysis')
parser.add_option('-x', '--stop_on_error', help='Stop processing barcodes after one fails. Otherwise continue to the next.', action="store_true", dest='stop_on_error')
parser.add_option('-i', '--isDx', help='platform specific environmental variable', dest='isDx', default='')
(cmdOptions, args) = parser.parse_args()
if( len(args) != 2 ):
printerr('Takes two file arguments: startplugin.json barcodes.json')
raise TypeError(os.path.basename(__file__)+" takes exactly two arguments (%d given)."%len(args))
with open(args[0]) as jsonFile:
jsonParams = json.load(jsonFile)
global pluginParams, barcodeInput
with open(args[1]) as jsonFile:
barcodeInput = json.load(jsonFile)
pluginParams['cmdOptions'] = cmdOptions
pluginParams['jsonInput'] = args[0]
pluginParams['barcodeInput'] = args[1]
pluginParams['jsonParams'] = jsonParams
def emptyResultsFolder():
'''Purge everything in output folder except for specifically named files.'''
if not pluginParams['cmdOptions'].purge_results: return
# Dangerous - replace with something safer if it becomes obvious (e.g. putting output in subfolder?)
results_dir = pluginParams['results_dir']
if results_dir == '/': return
logopt = pluginParams['cmdOptions'].logopt
cwd = os.path.realpath(os.getcwd())
if logopt or os.path.exists( os.path.join(results_dir,pluginParams['report_name']) ):
printlog("Purging old results...")
for root,dirs,files in os.walk(results_dir,topdown=False):
for name in files:
# these are the exceptions - partial names and in the to level results
if root == results_dir:
start = os.path.basename(name)[:10]
if start == "drmaa_stdo" or start == "ion_plugin" or start == "startplugi" or start == 'barcodes.j':
continue
fname = os.path.join(root,name)
if logopt and root == results_dir:
printlog("Removing file %s"%fname)
os.system('rm -f "%s"'%fname)
for name in dirs:
fname = os.path.realpath(os.path.join(root,name))
if fname.startswith(cwd):
printlog("Warning: Leaving folder %s as in cwd path."%fname)
continue
if logopt:
printlog("Removing directory %s"%fname)
os.system('rm -rf "%s"'%fname)
if logopt: printlog("")
def parseToDict(filein,sep=None):
ret = {}
if os.path.exists(filein):
with open(filein) as fin:
for line in fin:
line = line.strip()
# ignore lines being with non-alphanum (for comments, etc)
if line == "" or not line[0].isalnum():
continue
kvp = line.split(sep,1)
if len(kvp) > 1:
ret[kvp[0].strip()] = kvp[1].strip()
else:
printerr("parseToDict() could not open "+filein)
return ret
def printerr(msg):
cmd = os.path.basename(__file__)
sys.stderr.write( '%s: ERROR: %s\n' % (cmd,msg) )
sys.stderr.flush()
def printlog(msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.stderr.flush()
def printtime(msg):
# use unix 'date' command so output format is identical to called script
runtime = Popen( ["date"], stdout=PIPE, shell=False )
dtm = runtime.communicate()[0]
printlog( '(%s) %s'%(dtm.strip(),msg) )
def createlink(srcPath,destPath):
# using system call as os.symlink() only seems to handle one file at a time and has other limitations
if not srcPath:
printlog("WARNING: Failed to create symlink as source path is empty.")
return False
elif not os.path.exists(srcPath):
printlog("WARNING: Failed to create symlink as source path '%s' was not found."%srcPath)
return False
elif not destPath:
printlog("WARNING: Failed to create symlink as destination path is empty.")
return False
# -nf prevents both warning and odd behavior where target exists and is a directory
os.system('ln -snf "%s" "%s"'%(srcPath,destPath))
if pluginParams['cmdOptions'].logopt:
printlog("Created symlink %s -> %s"%(destPath,srcPath))
return True
def deleteTempFiles(tmpFiles):
if tmpFiles == None or pluginParams['cmdOptions'].keep_temp: return
output_dir = pluginParams['output_dir']
for filename in tmpFiles:
flist = glob( os.path.join(output_dir,filename) )
for f in flist:
if pluginParams['cmdOptions'].logopt:
printlog("Deleting file %s"%f)
os.system('rm -f "%s"'%f)
def createReport(reportName,reportTemplate,reportData):
# configure django to use the templates folder and various installed apps
if not settings.configured:
plugin_dir = pluginParams['plugin_dir'] if 'plugin_dir' in pluginParams else os.path.realpath(__file__)
settings.configure( DEBUG=False, TEMPLATE_DEBUG=False,
INSTALLED_APPS=('django.contrib.humanize',),
TEMPLATE_DIRS=(os.path.join(plugin_dir,'templates'),) )
with open(reportName,'w') as bcsum:
bcsum.write( render_to_string(reportTemplate,safeKeys(reportData)) )
def loadPluginParams():
'''Process default command args and json parameters file to extract TSS plugin environment.'''
global pluginParams
parseCmdArgs()
# copy typical environment data needed for analysis
jsonParams = pluginParams['jsonParams']
pluginParams['plugin_name'] = jsonParams['runinfo'].get('plugin_name','')
pluginParams['plugin_dir'] = jsonParams['runinfo'].get('plugin_dir','.')
pluginParams['genome_id'] = jsonParams['runinfo'].get('library','')
pluginParams['run_name'] = jsonParams['expmeta'].get('run_name','')
pluginParams['analysis_name'] = jsonParams['expmeta'].get('results_name',pluginParams['plugin_name'])
pluginParams['analysis_dir'] = jsonParams['runinfo'].get('analysis_dir','.')
pluginParams['results_dir'] = jsonParams['runinfo'].get('results_dir','.')
pluginParams['logopt'] = '-l' if pluginParams['cmdOptions'].logopt else ''
# get FILEPATH_OUTPUT_STEM or create old default if not available
pluginParams['prefix'] = jsonParams['expmeta'].get('output_file_name_stem','')
if not pluginParams['prefix']:
pluginParams['prefix'] = jsonParams['expmeta'].get('run_name','auto')
if 'results_name' in jsonParams['expmeta']:
pluginParams['prefix'] += "_" + jsonParams['expmeta']['results_name']
# TODO: replace this with url_plugindir when available from startplugin.json
resurl = jsonParams['runinfo'].get('results_dir','.')
if pluginParams['cmdOptions'].isDx:
plgpos = resurl.find('plugins')
else:
plgpos = resurl.find('plugin_out')
if plgpos >= 0:
resurl = os.path.join( jsonParams['runinfo'].get('url_root','.'), resurl[plgpos:] )
pluginParams['results_url'] = resurl
pluginParams['barcoded'] = NONBARCODED not in barcodeInput
# disable run skip if no report exists => plugin has not been run before
pluginParams['report_name'] = pluginParams['plugin_name']+'.html'
pluginParams['block_report'] = os.path.join(pluginParams['results_dir'],pluginParams['plugin_name']+'_block.html')
if not os.path.exists( os.path.join(pluginParams['results_dir'],pluginParams['report_name']) ):
if pluginParams['cmdOptions'].skip_analysis and not pluginParams['cmdOptions'].cmdline:
printlog("Warning: Skip analysis option ignorred as previous output appears to be missing.")
pluginParams['cmdOptions'].skip_analysis = False
# set up plugin specific options depending on auto-run vs. plan vs. GUI
config = pluginParams['config'] = jsonParams['pluginconfig'].copy() if 'pluginconfig' in jsonParams else {}
launchmode = config.get('launch_mode','')
if launchmode == 'Manual':
furbishPluginParams()
elif 'plan' in jsonParams:
# assume that either plan.html or config.html has partially defined the config if launch_mode is defined
if launchmode:
furbishPluginParams()
else:
config['launch_mode'] = 'Autostart with plan configuration'
addAutorunParams(jsonParams['plan'])
else:
config['launch_mode'] = 'Autostart with default configuration'
addAutorunParams()
# check for non-supported reference alignments
refid = pluginParams['genome_id']
if refid.startswith("hg19") or refid.startswith("GRCh37"):
pluginParams['track_set'] = "KIDDAME"
elif refid.startswith("GRCh38") or refid.startswith("hg38"):
pluginParams['track_set'] = "hg38_KIDDAME"
else:
pluginParams['track_set'] = "KIDDAME"
printlog("WARNING: This plugin is intended to be used with reads compatible with a hg19 or GRCh38 reference.")
# plugin configuration becomes basis of results.json file
global pluginResult, pluginReport
pluginResult = configReport()
if pluginParams['barcoded']:
pluginResult['barcodes'] = {}
pluginReport['barcodes'] = {}
def fileName(filepath):
filepath = os.path.basename(filepath)
return os.path.splitext(filepath)[0]
def writeDictToJsonFile(data,filename):
with open(filename,'w') as outfile:
json.dump(data,outfile,indent=2,sort_keys=True)
if pluginParams['cmdOptions'].logopt:
printlog("Created JSON file '%s'"%filename)
def safeKeys(indict):
# Recursive method to return a dictionary with non alpha-numeric characters in dictionary key names replaced with underscores.
# Expects indict to be a json-compatible dictionary or array of dictionaries.
# A non-dicionary object (reference) is returned, i.e. no copy is made as with arrays and dicionaries.
# lists and tuple (subclass) objects are returned as ordinary lists
if isinstance(indict,(list,tuple)):
nlist = []
for item in indict:
nlist.append(safeKeys(item))
return nlist
if not isinstance(indict,dict):
return indict
retdict = {}
for key,value in indict.iteritems():
retdict[re.sub(r'[^0-9A-Za-z]','_',key)] = safeKeys(value)
return retdict
def ensureFilePrefix(prependLen=0):
global pluginParams
prefix = pluginParams['prefix']
maxfn = prependLen + len(prefix) + max_fileext_len
if maxfn <= max_filename_len: return
# clip prefix to maximum size for allowed (before prepend/append)
prefix = prefix[:max_filename_len-maxfn]
maxfn = len(prefix)
# use nearest '_' if doesn't reduce the length of name by more than 70%
uslen = prefix.rfind('_')
if uslen >= 0.7*maxfn:
prefix = prefix[:uslen]
printlog("WARNING: Output file name stem shortened to ensure output file name length <= %d characters.\nNew stem = %s\n" % (max_filename_len,prefix))
pluginParams['prefix'] = prefix
def runNonBarcoded():
global pluginResult, pluginReport
try:
ensureFilePrefix()
barcodeData = barcodeInput[NONBARCODED]
sample = barcodeData.get('sample','')
sampleTag = ' (%s)'%sample if sample else ''
printlog("\nProcessing nonbarcoded%s...\n" % sampleTag)
createIncompleteReport()
pluginParams['output_dir'] = pluginParams['results_dir']
pluginParams['output_url'] = pluginParams['results_url']
pluginParams['output_prefix'] = | |
from imported module. Error was "{}."', e.args[0])
def _ensure_files_exist(exit_on_failure):
failure = False
if not _case_sensitive_regular_file_exists(VERSION_FILENAME):
_error_output('Version file {} was not found!', RE_FILE_EXTENSION.sub('.(py|txt)', VERSION_FILENAME))
failure = True
if not _case_sensitive_regular_file_exists(CHANGELOG_FILENAME):
_error_output('Changelog file {} was not found!', RE_FILE_EXTENSION.sub('.(txt|md|rst)', CHANGELOG_FILENAME))
failure = True
if failure:
_error_output(
'This project is not correctly configured to use `invoke release`! (File names are case sensitive!)'
)
if exit_on_failure:
sys.exit(1)
def _ensure_configured(command):
if not PARAMETERS_CONFIGURED:
_error_output_exit('Cannot `invoke {}` before calling `configure_release_parameters`.', command)
_ensure_files_exist(True)
def _set_map(map_function, iterable):
ret = set()
for i in iterable:
r = map_function(i)
if r:
if getattr(r, '__iter__', None):
ret.update(r)
else:
ret.add(r)
return ret
def _get_extra_files_to_commit():
return list(_set_map(lambda plugin: plugin.get_extra_files_to_commit(ROOT_DIRECTORY), RELEASE_PLUGINS))
def _get_version_errors():
return _set_map(lambda plugin: plugin.version_error_check(ROOT_DIRECTORY), RELEASE_PLUGINS)
def _pre_release(old_version):
for plugin in RELEASE_PLUGINS:
plugin.pre_release(ROOT_DIRECTORY, old_version)
def _pre_commit(old_version, new_version):
for plugin in RELEASE_PLUGINS:
plugin.pre_commit(ROOT_DIRECTORY, old_version, new_version)
def _pre_push(old_version, new_version):
for plugin in RELEASE_PLUGINS:
plugin.pre_push(ROOT_DIRECTORY, old_version, new_version)
def _post_release(old_version, new_version, pushed):
for plugin in RELEASE_PLUGINS:
plugin.post_release(ROOT_DIRECTORY, old_version, new_version, pushed)
def _pre_rollback(current_version):
for plugin in RELEASE_PLUGINS:
plugin.pre_rollback(ROOT_DIRECTORY, current_version)
def _post_rollback(current_version, rollback_to_version):
for plugin in RELEASE_PLUGINS:
plugin.post_rollback(ROOT_DIRECTORY, current_version, rollback_to_version)
def configure_release_parameters(module_name, display_name, python_directory=None, plugins=None,
use_pull_request=False, use_tag=True):
global MODULE_NAME, MODULE_DISPLAY_NAME, RELEASE_MESSAGE_TEMPLATE, VERSION_FILENAME, CHANGELOG_FILENAME
global ROOT_DIRECTORY, RELEASE_PLUGINS, PARAMETERS_CONFIGURED, VERSION_FILE_IS_TXT
global USE_PULL_REQUEST, USE_TAG
if PARAMETERS_CONFIGURED:
_error_output_exit('Cannot call configure_release_parameters more than once.')
if not module_name:
_error_output_exit('module_name is required')
if not display_name:
_error_output_exit('display_name is required')
MODULE_NAME = module_name
MODULE_DISPLAY_NAME = display_name
RELEASE_MESSAGE_TEMPLATE = 'Released {} version {{}}'.format(MODULE_DISPLAY_NAME)
ROOT_DIRECTORY = os.path.normpath(_get_root_directory())
if python_directory:
import_directory = os.path.normpath(os.path.join(ROOT_DIRECTORY, python_directory))
version_file_prefix = os.path.join(
ROOT_DIRECTORY,
'{python}/{module}/version'.format(python=python_directory, module=MODULE_NAME)
)
else:
import_directory = ROOT_DIRECTORY
version_file_prefix = os.path.join(
ROOT_DIRECTORY,
'{module}/version'.format(module=MODULE_NAME)
)
changelog_file_prefix = os.path.join(ROOT_DIRECTORY, 'CHANGELOG')
if _case_sensitive_regular_file_exists('{}.txt'.format(version_file_prefix)):
VERSION_FILE_IS_TXT = True
VERSION_FILENAME = '{}.txt'.format(version_file_prefix)
else:
VERSION_FILE_IS_TXT = False
VERSION_FILENAME = '{}.py'.format(version_file_prefix)
CHANGELOG_FILENAME = '{}.txt'.format(changelog_file_prefix)
if not _case_sensitive_regular_file_exists('{}.txt'.format(changelog_file_prefix)):
if _case_sensitive_regular_file_exists('{}.md'.format(changelog_file_prefix)):
CHANGELOG_FILENAME = '{}.md'.format(changelog_file_prefix)
elif _case_sensitive_regular_file_exists('{}.rst'.format(changelog_file_prefix)):
CHANGELOG_FILENAME = '{}.rst'.format(changelog_file_prefix)
if import_directory not in sys.path:
sys.path.insert(0, import_directory)
if getattr(plugins, '__iter__', None):
RELEASE_PLUGINS = plugins
USE_PULL_REQUEST = use_pull_request
USE_TAG = use_tag
PARAMETERS_CONFIGURED = True
@task
def version(_):
"""
Prints the "Invoke Release" version and the version of the current project.
"""
if not PARAMETERS_CONFIGURED:
_error_output_exit('Cannot `invoke version` before calling `configure_release_parameters`.')
_standard_output('Python {}', sys.version.split('\n')[0].strip())
from invoke import __version__ as invoke_version
_standard_output('Invoke {}', invoke_version)
from invoke_release.version import __version__
_standard_output('Invoke Release {}', __version__)
_ensure_files_exist(False)
for error in _get_version_errors():
_error_output(error)
_standard_output('{module} {version}', module=MODULE_DISPLAY_NAME, version=_import_version_or_exit())
_standard_output('Detected Git branch: {}', _get_branch_name(False))
_standard_output('Detected version file: {}', VERSION_FILENAME)
_standard_output('Detected changelog file: {}', CHANGELOG_FILENAME)
@task(help={
'verbose': 'Specify this switch to include verbose debug information in the command output.',
'no-stash': 'Specify this switch to disable stashing any uncommitted changes (by default, changes that have '
'not been committed are stashed before the branch is created).',
})
def branch(_, verbose=False, no_stash=False):
"""
Creates a branch from a release tag for creating a new patch or minor release from that branch.
"""
_ensure_configured('release')
from invoke_release.version import __version__
_standard_output('Invoke Release {}', __version__)
_setup_task(no_stash, verbose)
try:
_fetch_tags(verbose)
tags = _get_tag_list(verbose)
branch_version = _prompt('Enter a version tag from which to create a new branch (or "exit"):').lower()
if not branch_version or branch_version == INSTRUCTION_EXIT:
raise ReleaseExit()
if branch_version not in tags:
raise ReleaseFailure('Version number {} not in the list of available tags.'.format(branch_version))
_v = LooseVersion(branch_version)
minor_branch = '.'.join(list(map(six.text_type, _v.version[:2])) + ['x'])
major_branch = '.'.join(list(map(six.text_type, _v.version[:1])) + ['x', 'x'])
proceed_instruction = _prompt(
'Using tag {tag}, would you like to create a minor branch for patch versions (branch {minor}, '
'recommended), or a major branch for minor versions (branch {major})? (MINOR/major/exit):',
tag=branch_version,
minor=minor_branch,
major=major_branch,
)
if proceed_instruction == INSTRUCTION_EXIT:
raise ReleaseExit()
new_branch = major_branch if proceed_instruction == INSTRUCTION_MAJOR else minor_branch
if USE_PULL_REQUEST:
if _is_branch_on_remote(verbose, new_branch):
_standard_output(
'Branch {branch} exists on remote. Creating local tracking branch.',
branch=new_branch,
)
created = _create_local_tracking_branch(verbose, new_branch)
if not created:
raise ReleaseFailure(
'Could not create local tracking branch {branch}.\n'
'Does a local branch named {branch} already exists?\n'
'Delete or rename your local branch {branch} and try again.'.format(branch=new_branch),
)
else:
_standard_output(
'Branch {branch} does not exist on remote.\n'
'Creating branch, and pushing to remote.',
branch=new_branch,
)
_create_branch_from_tag(verbose, branch_version, new_branch)
_push_branch(verbose, new_branch)
cherry_pick_branch_suffix = _prompt(
'Now we will create the branch where you will apply your fixes. We\n'
'need a name to uniquely identify your feature branch. I suggest using\n'
'the JIRA ticket id, e.g. EB-120106, of the issue you are working on:'
)
if not cherry_pick_branch_suffix:
raise ReleaseFailure('You must enter a name to identify your feature branch.')
_create_branch(
verbose,
'cherry-pick-{hotfix_branch_name}-{suffix}'.format(
hotfix_branch_name=new_branch,
suffix=cherry_pick_branch_suffix,
)
)
else:
_create_branch_from_tag(verbose, branch_version, new_branch)
push_instruction = _prompt(
'Branch {} created. Would you like to go ahead and push it to remote? (y/N):',
new_branch,
).lower()
if push_instruction and push_instruction == INSTRUCTION_YES:
_push_branch(verbose, new_branch)
_standard_output('Branch process is complete.')
except ReleaseFailure as e:
_error_output(e.args[0])
except subprocess.CalledProcessError as e:
_error_output(
'Command {command} failed with error code {error_code}. Command output:\n{output}',
command=e.cmd,
error_code=e.returncode,
output=e.output.decode('utf8'),
)
except (ReleaseExit, KeyboardInterrupt):
_standard_output('Canceling branch!')
finally:
_cleanup_task(verbose)
@task(help={
'verbose': 'Specify this switch to include verbose debug information in the command output.',
'no-stash': 'Specify this switch to disable stashing any uncommitted changes (by default, changes that have '
'not been committed are stashed before the release is executed).',
})
def release(_, verbose=False, no_stash=False):
"""
Increases the version, adds a changelog message, and tags a new version of this project.
"""
_ensure_configured('release')
from invoke_release.version import __version__
_standard_output('Invoke Release {}', __version__)
__version__ = _import_version_or_exit()
version_regular_expression = RE_VERSION
branch_name = _get_branch_name(verbose)
if branch_name != BRANCH_MASTER:
if not RE_VERSION_BRANCH_MAJOR.match(branch_name) and not RE_VERSION_BRANCH_MINOR.match(branch_name):
_error_output(
'You are currently on branch "{}" instead of "master." You should only release from master or version '
'branches, and this does not appear to be a version branch (must match \\d+\\.x\\.x or \\d+.\\d+\\.x). '
'\nCanceling release!',
branch_name,
)
return
instruction = _prompt(
'You are currently on branch "{branch}" instead of "master." Are you sure you want to continue releasing '
'from "{branch}?" You should only do this from version branches, and only when higher versions have been '
'released from the parent branch. (y/N):',
branch=branch_name,
).lower()
if instruction != INSTRUCTION_YES:
_standard_output('Canceling release!')
return
version_regular_expression = re.compile(
r'^' + branch_name.replace('.x', r'.\d+').replace('.', r'\.') + r'([a-zA-Z\d.-]*[a-zA-Z\d]+)?$',
)
try:
_pre_release(__version__)
except ReleaseFailure as e:
_error_output_exit(e.args[0])
_setup_task(no_stash, verbose)
try:
_standard_output('Releasing {}...', MODULE_DISPLAY_NAME)
_standard_output('Current version: {}', __version__)
release_version = _prompt('Enter a new version (or "exit"):').lower()
if not release_version or release_version == INSTRUCTION_EXIT:
raise ReleaseExit()
if not version_regular_expression.match(release_version):
raise ReleaseFailure(
'Invalid version specified: {version}. Must match "{regex}".'.format(
version=release_version,
regex=version_regular_expression.pattern,
),
)
# Deconstruct and reconstruct the version, to make sure it is consistent everywhere
version_info = release_version.split('.', 2)
end_parts = list(filter(None, RE_SPLIT_AFTER_DIGITS.split(version_info[2], 1)))
if len(end_parts) > 1:
version_info[0] = int(version_info[0])
version_info[1] = int(version_info[1])
version_info[2] = int(end_parts[0])
version_info.append(end_parts[1].strip(' .-_'))
else:
version_info = list(map(int, version_info))
release_version = '-'.join(
filter(None, ['.'.join(map(six.text_type, version_info[:3])), (version_info[3:] or [None])[0]])
) # This must match the code in VERSION_VARIABLE_TEMPLATE at the top of this file
if not (LooseVersion(release_version) > LooseVersion(__version__)):
raise ReleaseFailure(
'New version number {new_version} is not greater than current version {old_version}.'.format(
new_version=release_version,
old_version=__version__,
),
)
if _does_tag_exist_locally(release_version, verbose) or _is_tag_on_remote(release_version, verbose):
raise ReleaseFailure(
'Tag {} already exists locally or remotely (or both). Cannot create version.'.format(release_version),
)
cl_header, cl_message, cl_footer = _prompt_for_changelog(verbose)
instruction = _prompt('The release has not yet been committed. Are you ready to commit it? (Y/n):').lower()
if instruction and instruction != INSTRUCTION_YES:
raise ReleaseExit()
_standard_output('Releasing {module} version: {version}', module=MODULE_DISPLAY_NAME, version=release_version)
_write_to_version_file(release_version, version_info, verbose)
_write_to_changelog_file(release_version, cl_header, cl_message, cl_footer, verbose)
_pre_commit(__version__, release_version)
if USE_PULL_REQUEST:
current_branch_name = _get_branch_name(verbose)
branch_name = 'invoke-release-{}-{}'.format(current_branch_name, release_version)
_create_branch(verbose, branch_name)
_commit_release_changes(release_version, cl_message, verbose)
_pre_push(__version__, release_version)
if USE_TAG:
_tag_branch(release_version, cl_message, verbose)
pushed_or_rolled_back = _push_release_changes(release_version, branch_name, verbose)
if USE_PULL_REQUEST:
_checkout_branch(verbose, current_branch_name)
_post_release(__version__, release_version, pushed_or_rolled_back)
if USE_PULL_REQUEST:
_standard_output("You're almost done! The release process will be complete when you create "
"a pull request and it is merged.")
else:
_standard_output('Release process is complete.')
except ReleaseFailure as e:
_error_output(e.args[0])
except subprocess.CalledProcessError as e:
_error_output(
'Command {command} failed with error code {error_code}. Command output:\n{output}',
command=e.cmd,
error_code=e.returncode,
output=e.output.decode('utf8'),
)
except (ReleaseExit, KeyboardInterrupt):
_standard_output('Canceling release!')
finally:
_cleanup_task(verbose)
@task(help={
'verbose': 'Specify this switch to include verbose debug information in the command output.',
'no-stash': 'Specify this switch to disable stashing any uncommitted changes (by default, changes that have '
'not been committed are stashed before the release is rolled back).',
})
def rollback_release(_, | |
<reponame>enwudz/pytracker<filename>trackingTools.py
#!/usr/bin/python
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import glob
import sys
import os
from datetime import datetime
import sys
import analysisTools
import csv
'''
stuff to tweak to change tracking parameters:
grayBlur: the degree of blurring
(3,3) is low, used for clear Daphnia movies
(7,7) is good for fish
DiffImage: the pixel threshold for what is a real difference between frames.
Typically 12
smoothByAveraging: the buffer size (# of frames to average)
Typically 4
'''
#### things to modify / tweak to change tracking parameters
def getPixThreshold(pixThreshold = 25):
return pixThreshold
def getSmoothBufferSize(buffer = 4):
return buffer
#### Image and Video tools
# retrieve first frame of movie
def getFirstFrame(mov):
cap = cv2.VideoCapture(mov)
ret, img = cap.read()
cap.release()
return img
# set up video stream
def getVideoStream(systemArguments):
if len(systemArguments) < 2:
exit('usage: python ' + systemArguments[0] + ' [movFile OR 0]')
if systemArguments[1] == '0':
print('Starting camera')
videoStream = 0 # camera
videoType = 0
else:
videoStream = systemArguments[1] # saved movie
print('reading ' + videoStream)
videoType = 1
return videoStream, videoType
# find difference between current frame and stored frame
def diffImage(storedFrame,currentFrame,pixThreshold=0,showIt=0):
if pixThreshold == 0:
pixThreshold = getPixThreshold()
diff = cv2.absdiff(storedFrame,currentFrame)
_,diff = cv2.threshold(diff,pixThreshold,255,cv2.THRESH_BINARY)
if showIt == 1:
cv2.imshow('Press q to exit',diff) # check difference image
diff = diff / 255
return diff
# convert to grayscale and blur
def grayBlur(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gaussian kernel size
# needs to be positive, and odd. Higher values = more blurry
#kernelSize = (3,3) # for zebrafish, daphnia
kernelSize = (11,11)
return cv2.GaussianBlur(gray,kernelSize,0)
# quickly show an image in matplotlib
def quickShow(img):
plt.imshow(img,cmap='gray')
plt.xticks([]),plt.yticks([])
plt.show()
return
# load the saved first frame and the saved mask.
def loadImageAndMask():
# Load the setup image from this experiment
try:
i = cv2.imread('frame1.png')
m = cv2.imread('mask.png',0)
except:
exit('Cannot open image and/or mask file')
return(i,m)
# return a subset of an image based on corner coordinates
def getImageSliceFromCorners(img,corner):
lowerLeft = corner[0]
upperRight = corner[1]
imageSlice = img[upperRight[1]:lowerLeft[1],lowerLeft[0]:upperRight[0]]
return imageSlice
# adjust camera resolution
def adjustResolution(cap,cameraType):
if (cameraType == 1):
xres=1280
yres=720
elif (cameraType == 2):
xres=825
yres=480
else:
print('Using default resolution')
print ('Adjusting camera resolution to %s x %s' % (str(xres), str(yres)))
cap.set(3,xres)
cap.set(4,yres)
return cap
# from a color map, make N evenly spaced colors
def make_N_colors(cmap_name, N):
cmap = cm.get_cmap(cmap_name, N)
cmap = cmap(np.arange(N))[:,0:3]
return [tuple(i*255) for i in cmap]
# calculate or acquire elapsed time
def getElapsedTime(videoType,vidInfo,startTime,endTime):
if videoType == 0:
vidInfo['vidLength'] = (endTime-startTime).days * (24*60*60) + (endTime-startTime).seconds
vidInfo['startTime'] = startTime
vidInfo['endTime'] = endTime
else:
vidInfo['vidLength'] = float(input('Enter movie length (in seconds): '))
return vidInfo
#### ROI tools
# find ROIs on the mask file
# this is here because I wanted to minimize instances of cv2.findContours
def findROIsOnMask(m):
contours = cv2.findContours(m.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
contours = contours[0]
#contours = contours[0] if major_ver==2 else contours[1]
return contours
# set non-ROI areas to BLACK
def removeNonROI(img,mask):
img[np.where(mask==0)]=0
return img
# examine a masked ROI, and guess where the object is by find contours (not great)
def guessTheObject(roiSlice,maskSlice):
# invert image so that objects are light, background dark
inverted = 255-roiSlice
# set area outside of roi to black
inverted = removeNonROI(inverted,maskSlice)
# threshold the image
_,th = cv2.threshold(inverted,170,255,cv2.THRESH_BINARY)
# find contours
contours = findROIsOnMask(th)
if len(contours) > 0:
# Find the index of the largest contour
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
# find center coordinates of largest contour
M = cv2.moments(cnt)
try:
cX = int(M["m10"] / M["m00"])
except:
cX = 0
try:
cY = int(M["m01"] / M["m00"])
except:
cY = 0
else:
# can't find a good object. Set x,y as CENTER of roiSlice
xspan,yspan = np.shape(roiSlice)
cX = int(round(xspan/2))
cY = int(round(yspan/2))
# cX = 0
# cY = 0
return (cX,cY)
# make and save a SINGLE circular ROI
def makeCircularROIMask(gray,center,radius):
# img should be grayscale
blank = np.zeros(np.shape(gray))
cv2.circle(blank, center, radius, (255), -1)
cv2.imwrite('mask.png',blank.astype('uint8'))
return blank
# setup a single, circular ROI on a video file
# wish: code to select (on an image) center and radius of circle
def singleCircle(vidFile, center=(45,45), radius = 35):
fr = getFirstFrame(vidFile)
fr = grayBlur(fr)
# make single circular ROI mask for testing
mask = makeCircularROIMask(fr,center,radius)
# save mask
cv2.imwrite('mask.png',mask)
# subtract out nonROI
subt = removeNonROI(fr,mask)
# show the masked image
quickShow(subt)
return mask
# find all of the pixels that are selected by the user (GREEN rectangle(s))
def greenThreshImage(mask):
# everything that is not GREEN in mask is set to black
mask[mask != (0,255,0)] = 0
# convert to thresholded image
gmask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
_,gmask = cv2.threshold(gmask,12,255,0)
return gmask
# return the # of ROIs in a mask
def countROIs():
return len(findROIsOnMask(cv2.imread('mask.png',0)))
# find the ROIs in a thresholded image and assign each a different number
def findAndNumberROIs():
# input needs to be grayscale, as uint8 ... read from file
try:
mask = cv2.imread('mask.png',0)
except:
exit('Cannot open mask file')
# find shapes in mask
contours = findROIsOnMask(mask)
# assign pixel values to each ROI evenly across 255 (np.floor(x)) and save mask
pixVals = np.floor(np.linspace(10,255, len(contours) + 2))
# or assign pixel values sequentially
#pixVals = range(len(contours)+1)
# draw the contours on the mask
for h,cnt in enumerate(reversed(contours)):
cv2.drawContours(mask,[cnt],0,pixVals[h+1],-1)
#cv2.drawContours(mask,cnt,0,pixVals[h+1],-1)
# save the new mask that is numbered by ROI
cv2.imwrite('mask.png',mask)
# return mask, number of contours
return mask, len(contours)
# assign an integer value to each ROI in a mask
def convertMaskToWeights(mask):
vals = np.unique(mask)
for i in range(len(vals)):
mask[mask==vals[i]]=i
mask = mask.astype(int)
w = mask.ravel() # convert to single row for weights in bincount
return mask,w
# load an ROI mask and get a list of corner coordinates
# of inscribing rectangle for reach ROI
def getROICornersFromMask():
corners=[]
m = cv2.imread('mask.png',0)
contours = findROIsOnMask(m)
for cnt in range(len(contours)):
coords = contours[cnt]
xPoints = [c[0][0] for c in coords]
yPoints = [c[0][1] for c in coords]
minX = np.min(xPoints)
maxX = np.max(xPoints)
minY = np.min(yPoints)
maxY = np.max(yPoints)
lowerLeft = (minX,maxY)
upperRight = (maxX,minY)
corners.append([lowerLeft,upperRight])
return list(reversed(corners))
# Generate a figure showing the ROImask on image
def showROI():
(img,m) = loadImageAndMask()
f = plt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
pic = plt.subplot(1,1,1)
pic.imshow(img) # show video image
#pic.hold(True) # deprecated
# need to find and number ROI
map = pic.imshow(m,alpha=0.3) # superimpose ROI mask
numROI = str(len(np.unique(m)) - 1)
plt.title('Number of ROI = ' + numROI)
plt.xticks([]),plt.yticks([]) # clear axis ticks and labels
# color Bar
labs = ['background']
for a in range(1,len(np.unique(m))):
labs.append(str(a))
# labels for color bar; wishlist: reduce if > 16
cbar = f.colorbar(map,ticks=np.unique(m))
cbar.ax.get_yaxis().labelpad=20
cbar.ax.set_yticklabels(labs)
cbar.ax.invert_yaxis()
cbar.set_label('ROI #', rotation = 270, size = 16)
deleteData('roiMask*.png')
savedRoiMask = 'roiMask' + numROI + '.png'
plt.savefig(savedRoiMask)
plt.show()
return
# find the corners of the user-selected GREEN rectangle
# as [x y] where 0 0 = lower left of image]
def getCornersOfRectangle(m):
ret,thresh = cv2.threshold(m,1,255,0)
contours = findROIsOnMask(m)
lowerLeft = tuple(contours[0][1][0]) # as (x,y) where 0 0 = lower left of image]
upperRight = tuple(contours[0][3][0])
return (lowerLeft, upperRight)
# given a number of wells in a plate, return the # columns and # rows
def getRowsColsFromNumWells(numWells):
# first get a list of the factors of the number
factors = []
for i in np.arange(numWells)+1:
if numWells % i == 0:
factors.append(i)
# if factor list is even, get the numbers around middle
if len(factors) % 2 == 0:
mid = int(len(factors)/2)
rows = factors[mid-1]
cols = factors[mid]
else:
# if factor list is odd, get median
cols = int(np.median(factors))
rows = cols
return rows, cols
def getXspanYspanFromRectangle(lowerLeft,upperRight):
xspan = upperRight[0] - lowerLeft[0]
yspan = lowerLeft[1] - upperRight[1]
return (xspan,yspan)
# make mask of CIRCULAR ROI, loading a mask and input well#
def gridCircles(numWells):
numRows,numCols = getRowsColsFromNumWells(numWells)
m = cv2.imread('mask.png',0)
vals = np.unique(m)
if len(vals) > 2:
sys.exit('Mask has more than one shape!')
# identify enclosing rectangle for entire plate
(lowerLeft, upperRight) = getCornersOfRectangle(m)
(xspan,yspan) = getXspanYspanFromRectangle(lowerLeft,upperRight)
# find radius
wallSize = 0.1 # expressed as proportion of fullRadius
fullRadius = int(np.round(xspan / (numCols * 2)))
radius = int(np.round((1-wallSize)*fullRadius))
# find coordinate of first circle
firstX = int(np.round(fullRadius)) + lowerLeft[0]
firstY = int(np.round(fullRadius)) + upperRight[1]
# find centers
xPoints = [firstX + (2*fullRadius*i) for i in np.arange(numCols)]
yPoints = [firstY + (2*fullRadius*i) for i in np.arange(numRows)]
centers = [(x,y) for y in yPoints for x in xPoints]
# draw circles on new blank image
b = np.zeros(np.shape(m))
[cv2.circle(b, i, radius, (255), -1) for i in centers]
return b
# make mask of rectangular/square ROI, loading a mask and input well#
def gridRectangles(numWells):
numRows,numCols = getRowsColsFromNumWells(numWells)
#numRows,numCols = 1,3
m = cv2.imread('mask.png',0)
vals = np.unique(m)
if len(vals) > 2:
sys.exit('Mask has more than one shape!')
(lowerLeft, upperRight) = getCornersOfRectangle(m)
(xspan,yspan) = getXspanYspanFromRectangle(lowerLeft,upperRight)
innerRect = 0 # 1 if want inner + outer rectangles
wallSize = 0.04 # usually 0.1
# print ('xspan = ' + str(xspan))
# print ('yspan = ' + str(yspan))
# print ('numCols = ' + str(numCols))
# print ('numRows = ' + str(numRows))
# Find dimensions of ROI's and walls
xWidth = int(round (xspan / (numCols + (numCols * wallSize) - wallSize))) # algebra!
yWidth = int(round (yspan / (numRows + (numRows * wallSize) - wallSize)))
xWallWidth = int(np.floor(wallSize * xWidth))
yWallWidth = int(np.floor(wallSize * yWidth))
# these will print coordinates of large rectangle
#print upperLeft
#print lowerRight
#print 'x width = ' + str(xWidth) + '; wallSize = ' + str(xWallWidth)
#print 'y width = ' + str(yWidth) + '; wallSize = ' + str(yWallWidth)
# make NEW MASK
# starting in upper left, find coordinates of each ROI
# and replace in plateMask current ROI number
# then move on to the next row and to the same thing
gridMask = np.zeros(m.shape)
roiNumber = 1
xStart | |
Process points in the list ``v``, with saturation at primes up to
``sat``. If ``sat`` is zero (the default), do no saturation.
INPUT:
- ``v`` (list of 3-tuples or lists of ints or Integers) -- a
list of triples of integers, which define points on the
curve.
- ``sat`` (int, default 0) -- saturate at primes up to ``sat``, or at
*all* primes if ``sat`` is zero.
OUTPUT:
None. But note that if the ``verbose`` flag is set, then there
will be some output as a side-effect.
EXAMPLES::
sage: E = mwrank_EllipticCurve([0,0,1,-7,6])
sage: E.gens()
[[1, -1, 1], [-2, 3, 1], [-14, 25, 8]]
sage: EQ = mwrank_MordellWeil(E)
sage: EQ.process([[1, -1, 1], [-2, 3, 1], [-14, 25, 8]])
P1 = [1:-1:1] is generator number 1
P2 = [-2:3:1] is generator number 2
P3 = [-14:25:8] is generator number 3
::
sage: EQ.points()
[[1, -1, 1], [-2, 3, 1], [-14, 25, 8]]
Example to illustrate the saturation parameter ``sat``::
sage: E = mwrank_EllipticCurve([0,0,1,-7,6])
sage: EQ = mwrank_MordellWeil(E)
sage: EQ.process([[1547, -2967, 343], [2707496766203306, 864581029138191, 2969715140223272], [-13422227300, -49322830557, 12167000000]], sat=20)
P1 = [1547:-2967:343] is generator number 1
...
Gained index 5, new generators = [ [-2:3:1] [-14:25:8] [1:-1:1] ]
sage: EQ.points()
[[-2, 3, 1], [-14, 25, 8], [1, -1, 1]]
Here the processing was followed by saturation at primes up to
20. Now we prevent this initial saturation::
sage: E = mwrank_EllipticCurve([0,0,1,-7,6])
sage: EQ = mwrank_MordellWeil(E)
sage: EQ.process([[1547, -2967, 343], [2707496766203306, 864581029138191, 2969715140223272], [-13422227300, -49322830557, 12167000000]], sat=0)
P1 = [1547:-2967:343] is generator number 1
P2 = [2707496766203306:864581029138191:2969715140223272] is generator number 2
P3 = [-13422227300:-49322830557:12167000000] is generator number 3
sage: EQ.points()
[[1547, -2967, 343], [2707496766203306, 864581029138191, 2969715140223272], [-13422227300, -49322830557, 12167000000]]
sage: EQ.regulator()
375.42919921875
sage: EQ.saturate(2) # points were not 2-saturated
saturating basis...Saturation index bound = 93
WARNING: saturation at primes p > 2 will not be done;
...
Gained index 2
New regulator = 93.857300720636393209
(False, 2, '[ ]')
sage: EQ.points()
[[-2, 3, 1], [2707496766203306, 864581029138191, 2969715140223272], [-13422227300, -49322830557, 12167000000]]
sage: EQ.regulator()
93.8572998046875
sage: EQ.saturate(3) # points were not 3-saturated
saturating basis...Saturation index bound = 46
WARNING: saturation at primes p > 3 will not be done;
...
Gained index 3
New regulator = 10.4285889689595992455
(False, 3, '[ ]')
sage: EQ.points()
[[-2, 3, 1], [-14, 25, 8], [-13422227300, -49322830557, 12167000000]]
sage: EQ.regulator()
10.4285888671875
sage: EQ.saturate(5) # points were not 5-saturated
saturating basis...Saturation index bound = 15
WARNING: saturation at primes p > 5 will not be done;
...
Gained index 5
New regulator = 0.417143558758383969818
(False, 5, '[ ]')
sage: EQ.points()
[[-2, 3, 1], [-14, 25, 8], [1, -1, 1]]
sage: EQ.regulator()
0.4171435534954071
sage: EQ.saturate() # points are now saturated
saturating basis...Saturation index bound = 3
Checking saturation at [ 2 3 ]
Checking 2-saturation
Points were proved 2-saturated (max q used = 11)
Checking 3-saturation
Points were proved 3-saturated (max q used = 13)
done
(True, 1, '[ ]')
"""
if not isinstance(v, list):
raise TypeError("v (=%s) must be a list"%v)
sat = int(sat)
for P in v:
if not isinstance(P, (list,tuple)) or len(P) != 3:
raise TypeError("v (=%s) must be a list of 3-tuples (or 3-element lists) of ints"%v)
self.__mw.process(P, sat)
def regulator(self):
"""
Return the regulator of the points in this subgroup of
the Mordell-Weil group.
.. note::
``eclib`` can compute the regulator to arbitrary precision,
but the interface currently returns the output as a ``float``.
OUTPUT:
(float) The regulator of the points in this subgroup.
EXAMPLES::
sage: E = mwrank_EllipticCurve([0,-1,1,0,0])
sage: E.regulator()
1.0
sage: E = mwrank_EllipticCurve([0,0,1,-7,6])
sage: E.regulator()
0.417143558758384
"""
return self.__mw.regulator()
def rank(self):
"""
Return the rank of this subgroup of the Mordell-Weil group.
OUTPUT:
(int) The rank of this subgroup of the Mordell-Weil group.
EXAMPLES::
sage: E = mwrank_EllipticCurve([0,-1,1,0,0])
sage: E.rank()
0
A rank 3 example::
sage: E = mwrank_EllipticCurve([0,0,1,-7,6])
sage: EQ = mwrank_MordellWeil(E)
sage: EQ.rank()
0
sage: EQ.regulator()
1.0
The preceding output is correct, since we have not yet tried
to find any points on the curve either by searching or
2-descent::
sage: EQ
Subgroup of Mordell-Weil group: []
Now we do a very small search::
sage: EQ.search(1)
P1 = [0:1:0] is torsion point, order 1
P1 = [-3:0:1] is generator number 1
saturating up to 20...Checking 2-saturation
...
P4 = [12:35:27] = 1*P1 + -1*P2 + -1*P3 (mod torsion)
sage: EQ
Subgroup of Mordell-Weil group: [[1:-1:1], [-2:3:1], [-14:25:8]]
sage: EQ.rank()
3
sage: EQ.regulator()
0.4171435534954071
We do in fact now have a full Mordell-Weil basis.
"""
return self.__mw.rank()
def saturate(self, max_prime=-1, odd_primes_only=False):
r"""
Saturate this subgroup of the Mordell-Weil group.
INPUT:
- ``max_prime`` (int, default -1) -- saturation is performed for
all primes up to ``max_prime``. If `-1` (the default), an
upper bound is computed for the primes at which the subgroup
may not be saturated, and this is used; however, if the
computed bound is greater than a value set by the ``eclib``
library (currently 97) then no saturation will be attempted
at primes above this.
- ``odd_primes_only`` (bool, default ``False``) -- only do
saturation at odd primes. (If the points have been found
via :meth:``two_descent()`` they should already be 2-saturated.)
OUTPUT:
(3-tuple) (``ok``, ``index``, ``unsatlist``) where:
- ``ok`` (bool) -- ``True`` if and only if the saturation was
provably successful at all primes attempted. If the default
was used for ``max_prime`` and no warning was output about
the computed saturation bound being too high, then ``True``
indicates that the subgroup is saturated at *all*
primes.
- ``index`` (int) -- the index of the group generated by the
original points in their saturation.
- ``unsatlist`` (list of ints) -- list of primes at which
saturation could not be proved or achieved. Increasing the
decimal precision should correct this, since it happens when
a linear combination of the points appears to be a multiple
of `p` but cannot be divided by `p`. (Note that ``eclib``
uses floating point methods based on elliptic logarithms to
divide points.)
.. note::
We emphasize that if this function returns ``True`` as the
first return argument (``ok``), and if the default was used for the
parameter ``max_prime``, then the points in the basis after
calling this function are saturated at *all* primes,
i.e., saturating at the primes up to ``max_prime`` are
sufficient to saturate at all primes. Note that the
function might not have needed to saturate at all primes up
to ``max_prime``. It has worked out what prime you need to
saturate up to, and that prime might be smaller than ``max_prime``.
.. note::
Currently (May 2010), this does not remember the result of
calling :meth:`search()`. So calling :meth:`search()` up
to height 20 then calling :meth:`saturate()` results in
another search up to height 18.
EXAMPLES::
sage: E = mwrank_EllipticCurve([0,0,1,-7,6])
sage: EQ = mwrank_MordellWeil(E)
We initialise with three points which happen to be 2, 3 and 5
times the generators of this rank 3 curve. To prevent
automatic saturation at this stage we set the parameter
``sat`` to 0 (which is in fact the default)::
sage: EQ.process([[1547, -2967, 343], [2707496766203306, 864581029138191, 2969715140223272], [-13422227300, -49322830557, 12167000000]], sat=0)
P1 = [1547:-2967:343] is generator number 1
P2 = [2707496766203306:864581029138191:2969715140223272] is generator number 2
P3 = [-13422227300:-49322830557:12167000000] is generator number 3
sage: EQ
Subgroup of Mordell-Weil group: [[1547:-2967:343], [2707496766203306:864581029138191:2969715140223272], [-13422227300:-49322830557:12167000000]]
sage: EQ.regulator()
375.42919921875
Now we saturate at `p=2`, and gain index 2::
sage: EQ.saturate(2) # points were not 2-saturated
saturating basis...Saturation index bound = 93
WARNING: saturation at primes p > 2 will not be done;
...
Gained index 2
New regulator = 93.857300720636393209
(False, 2, '[ ]')
sage: EQ
Subgroup of Mordell-Weil group: [[-2:3:1], [2707496766203306:864581029138191:2969715140223272], [-13422227300:-49322830557:12167000000]]
sage: EQ.regulator()
93.8572998046875
Now we saturate at `p=3`, and gain index 3::
sage: EQ.saturate(3) # points were not 3-saturated
saturating basis...Saturation index bound = 46
WARNING: saturation at primes p > 3 will not be done;
...
Gained index | |
listaips.append(str(i))
# verifica se há ips fixos no dhcp
checkfixosdhcp = set(listaips).intersection(set(dhcprange))
if checkfixosdhcp:
raise ValidationError("Existe um Ip fixo configurado dentro do range de DHCP, por favor corrija.")
def __unicode__(self):
# return self.name
#return u'%s (%s) - VLAN: %s' % (self.name, self.address, self.vln)
return u'%s (%s)' % (self.name, self.address)
class Ownerid(models.Model):
class Meta:
verbose_name = u'Patrimônio'
verbose_name_plural = u'Patrimônios'
ordering = ['num']
num = models.CharField(u'Número', max_length=100, unique=True)
def __unicode__(self):
return self.num
class Devicemodel(models.Model):
class Meta:
verbose_name = u'Modelo de equipamento'
verbose_name_plural = u'Modelos de equipamento'
ordering = ['name']
name = models.CharField(max_length=200, verbose_name='Nome', unique=True)
comments = models.TextField(u'Observações', max_length=2000, blank=True, null=True)
def __unicode__(self):
return self.name
class Device(models.Model):
class Meta:
verbose_name = 'Device'
verbose_name_plural = 'Devices'
ordering = ['name']
name = models.CharField(u'Nome/Identificação', max_length=200, unique=True)
active = models.BooleanField(u'Ativo/Em uso', default=True)
ownerid = models.OneToOneField(Ownerid, verbose_name=u'Patrimônio', unique=True, null=True, blank=True,
help_text=u"Patrimônio")
devicemodel = models.ForeignKey(Devicemodel, verbose_name='Modelo do equipamento', on_delete=models.PROTECT,
blank=True, null=True)
url = models.URLField('URL', blank=True, null=True, help_text=u"Endereço de interface de administração web")
admuser = models.CharField(u'Usuário administrador', max_length=100, blank=True, null=True)
admpass = models.CharField(u'Senha do usuário administrador', max_length=50, blank=True, null=True)
modification_date = models.DateTimeField(u'Data de modificação', editable=False, blank=True, null=True)
groups = models.ManyToManyField(Group, verbose_name="Grupos", blank=True,
help_text=u"Grupo de usuários relacionado. Aqueles que terão permissão de edição")
comments = models.TextField(u'Observações', max_length=2000, blank=True, null=True)
def save(self):
# type: () -> object
self.modification_date = datetime.datetime.today()
super(Device, self).save()
def __unicode__(self):
return unicode(self.name)
class Ip(models.Model):
class Meta:
verbose_name = 'IP fixo'
verbose_name_plural = 'IPs fixos'
ordering = ['address']
address = models.GenericIPAddressField(verbose_name=u"Endereço", unique=True, help_text="Ex: 10.0.0.1")
network = models.ForeignKey(Network, verbose_name=u"Rede", on_delete=models.PROTECT, blank=True, null=True)
# device = models.ForeignKey(Device,verbose_name='Dispositivo - Host,Switch ou Pilha',on_delete=models.PROTECT)
device = models.ForeignKey(Device, verbose_name='Dispositivo - Host,Switch ou Pilha')
modification_date = models.DateTimeField(u'Data de modificação', editable=False, blank=True, null=True)
comments = models.TextField(u'Observações', max_length=2000, blank=True, null=True)
# validacao
def clean(self):
try:
e = self.address
checkip = ipaddress.IPv4Address(e)
except:
raise ValidationError("Endereço informado não é um endereço IP válido")
e = self.address
r = self.network.address
m = str(self.network.mask)
rm = r + "/" + m
verificarede = ipaddress.ip_address(e) in ipaddress.ip_network(rm)
dhcp = self.network.dhcp
if e and r:
if e == r:
raise ValidationError("Endereço de rede não pode ser cadastrado como IP fixo.")
else:
raise ValidationError("erro")
if verificarede == False:
raise ValidationError("Endereço IP não pertence à rede selecionada. Por favor, corrija.")
# chacagem broadcast
rede = ipaddress.ip_network(rm)
broadcast = rede.broadcast_address
if (ipaddress.ip_address(e) == broadcast):
raise ValidationError("Endereço informado é o endereço de broadcast da rede. Por favor, corrija.")
if dhcp == True:
redeid = self.network.id
qtdipsrede = len(list(ipaddress.ip_network(rm).hosts()))
qtdfixos = Ip.objects.filter(network=redeid).count()
ip1 = self.network.dhcp_start
ip2 = self.network.dhcp_end
rede = ipaddress.ip_network(rm)
broadcast = rede.broadcast_address
start = re.split(r'(\.|/)', ip1)
end = re.split(r'(\.|/)', ip2)
ipstart = int(start[-1])
ipend = int(end[-1])
dhcprange = range(ipstart, ipend + 1)
a = re.split(r'(\.|/)', e)
ip = int(a[-1])
verificaip = ip in dhcprange
qtddhcp = len(range(ipstart, ipend + 1))
if (qtddhcp + qtdfixos) >= qtdipsrede:
raise ValidationError("Não é possível cadastrar mais IPs nesta rede - Todos os IPs já estão em uso")
if verificaip == True:
raise ValidationError(
"Este endereço IP faz parte do range do DHCP da rede. Por favor cadastre um outro IP fixo.")
def save(self):
# type: () -> object
self.modification_date = datetime.datetime.today()
super(Ip, self).save()
def __unicode__(self):
return self.address
class Os(models.Model):
class Meta:
verbose_name = 'Sistema operacional'
verbose_name_plural = 'Sistemas operacionais'
ordering = ['name']
name = models.CharField(u'Nome', max_length=30)
version = models.CharField(u'Versão', max_length=20, blank=True, null=True)
def __unicode__(self):
return u'%s - %s' % (self.name, self.version)
#class Hosttype(models.Model):
# class Meta:
# verbose_name = 'Equipamento - Tipo'
# verbose_name_plural = 'Equipamento - Tipos'
# ordering = ['name']
# name = models.CharField('Nome', max_length=50)
# def __unicode__(self):
# return unicode(self.name)
class Host(Device):
class Meta:
verbose_name = 'Equipamento '
verbose_name_plural = 'Equipamento'
ordering = ['name']
LINUX = 'linux'
WINDOWS = 'windows'
BSD = 'bsd'
OTHER = 'other'
OS_PLAT_CHOICES = (
('', '---------'),
(LINUX, 'Linux'),
(WINDOWS, 'Windows'),
(BSD, 'BSD/MacOS X'),
(OTHER, 'Outros'),
)
AP = 'ap'
CAM = 'cam'
CFTV = 'cftv'
DESKTOP = 'desktop'
FW = 'fw'
MEDIA = 'media'
PHONE = 'voip'
PRINTER = 'printer'
ROUTER = "router"
SERVER = 'server'
STORAGE = 'storage'
VIRT = 'virt'
WIFI = 'wifi'
OTHER = 'other'
HWTYPE_CHOICES = (
('', '---------'),
(AP, 'Access Point'),
(CAM, 'Camera IP'),
(CFTV, 'Equipamento de CFTV'),
(DESKTOP, 'Destkop'),
(FW, 'Firewall/UTM'),
(MEDIA, 'Servidor de Media/Audio/Video'),
(PHONE, 'Telefone ou Equipamento VOIP'),
(PRINTER, 'Impressora/Scanner'),
(ROUTER, 'Roteador'),
(SERVER, 'Servidor de rede'),
(STORAGE, 'Storage/Equipamento de armazenamento/Backup'),
(VIRT, 'Servidor Virtualização'),
(WIFI, 'Controladora WIFI'),
(OTHER, 'Outros'),
)
supplierhw = models.BooleanField(u'Equipamento de terceiros', default=False)
vm = models.BooleanField(u'Máquina Virtual', default=False)
serial_number = models.CharField(u'Num de série', max_length=30, blank=True, null=True, unique=True)
os = models.ForeignKey(Os, verbose_name=u'Sistema Operacional', blank=True, null=True)
hwtype = models.CharField(u'Tipo de equipamento', blank=True, max_length=20, choices=HWTYPE_CHOICES,
default='other')
osplatform = models.CharField(u'Plataforma de sistema operacional', max_length=20, choices=OS_PLAT_CHOICES,
default='other')
manufactorer = models.ForeignKey(Manufactorer, blank=True, null=True, verbose_name=u'Fabricante')
mem = models.CharField(u'Memória instalada. Ex: 2GB', max_length=20, blank=True, null=True)
cpu = models.IntegerField(u'Quantidade de CPUS', blank=True, null=True)
place = models.ForeignKey(Place, verbose_name=u'Localização', null=True, blank=True,
help_text=u"Onde o equipamento está instalado (Não se aplica à máquinas virtuais)")
# tags = TaggableManager()
# diskspace
# lastupdate = = models.DateTimeField('Ultima atualização', blank=True, null=True)
# warranty = models.DateTimeField('Garantia', blank=True, null=True)
# os_key = models.CharField(u'Chave/Registro do S.O.,max_length=100, blank=True, null=True)
changed_by = models.ForeignKey(User, related_name="host_changed_by", null=True, blank=True)
history = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
def clean(self):
v = self.vm
s = self.serial_number
p = self.ownerid
sh = self.supplierhw
if v == False:
if p == None and sh == False:
raise ValidationError("Campo Patrimônio é obrigatório para máquinas físicas")
if v == True:
if p != None:
raise ValidationError(
"Campo Patrimônio só deve ser usado em máquinas físicas e de propriedade da instituição")
def save(self, *args, **kwargs):
# type: (object, object) -> object
if not self.serial_number:
self.serial_number = None
super(Host, self).save(*args, **kwargs)
#Log.objects.create(record_name=self.name, event_date=self.modification_date, record_type="host", actor="user")
def __unicode__(self):
return unicode(self.name)
class Hostupdate(models.Model):
class Meta:
verbose_name = u'Atualizacão'
verbose_name_plural = u'Atualizações'
ordering = ['aplication_date']
name = models.CharField(u'Nome/Identificação', max_length=100, help_text=u"Identificação da atualização aplicada")
aplication_date = models.DateField(u'Data de aplicação', blank=True, null=True)
register_date = models.DateTimeField(u'Data de registro', blank=True, null=True, editable=False, auto_now_add=True)
host = models.ForeignKey(Host, verbose_name='Dispositivo', on_delete=models.PROTECT)
comments = models.CharField(u'Observações', max_length=300, blank=True)
def __unicode__(self):
return unicode(self.name)
class Printer(Device):
class Meta:
verbose_name = 'Impressora'
verbose_name_plural = 'Impressoras'
ordering = ['name']
PRINT = '1'
MULTI = '2'
SCAN = '3'
OTHER = '4'
TYPE_CHOICES = (
('', '---------'),
(PRINT, 'Impressora'),
(MULTI, 'Multifuncional'),
(SCAN, 'Scanner'),
(OTHER, 'Outros'),
)
serial_number = models.CharField(u'Num de série', max_length=30, blank=True, null=True, unique=True)
supplierhw = models.BooleanField(u'Equipamento de terceiros', default=False)
manufactorer = models.ForeignKey(Manufactorer, blank=True, null=True, verbose_name=u'Fabricante')
# model = models.CharField(u'Modelo', max_length=30, blank=True, null=True)
place = models.ForeignKey(Place, verbose_name=u'Localização', null=True, blank=True)
printer_type = models.CharField(u'Tipo de equipamento', max_length=20, choices=TYPE_CHOICES, default='1',
help_text=u"Obrigatório")
changed_by = models.ForeignKey(User, related_name="printer_changed_by", null=True, blank=True)
history = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
def clean(self):
if not self.ownerid and not self.supplierhw:
raise ValidationError("Campo Patrimônio é obrigatório")
# def save(self, *args, **kwargs):
# if not self.serial_number:
# self.serial_number = None
# super(self, Printer).save(*args, **kwargs)
def __unicode__(self):
return unicode(self.name)
class Servicecategory(models.Model):
class Meta:
verbose_name = u'Serviços - categoria'
verbose_name_plural = u'Serviços - categorias'
ordering = ['name']
name = models.CharField('Nome', max_length=100)
def __unicode__(self):
return unicode(self.name)
class Service(models.Model):
class Meta:
verbose_name = 'Serviço'
verbose_name_plural = 'Serviços'
ordering = ['name']
name = models.CharField('Nome', max_length=100)
network = models.ForeignKey(Network, verbose_name="Rede", blank=True, null=True)
ip = models.ForeignKey(Ip, verbose_name=u"Endereço IP", blank=True, null=True)
category = models.ManyToManyField(Servicecategory, verbose_name="Categoria", blank=True)
modification_date = models.DateTimeField(u'Data de modificação', editable=False, blank=True, null=True)
obs = models.CharField(u'Observações', max_length=100, blank=True)
def save(self):
# type: () -> object
self.modification_date = datetime.datetime.today()
super(Service, self).save()
def __unicode__(self):
return unicode(self.name)
class Stack(Device):
class Meta:
verbose_name = 'Switches - Pilha'
verbose_name_plural = 'Switches - Pilhas'
ordering = ['name']
def __unicode__(self):
return unicode(self.name)
class SwitchManager(models.Manager):
def get_queryset(self):
query_set = super(SwitchManager, self).get_queryset()
return query_set.extra(
select={
'_ports_total': 'SELECT COUNT(*) FROM ace_switchport where ace_switchport.switch_id = ace_switch.device_ptr_id',
},
)
class Switch(Device):
class Meta:
verbose_name = 'Switch'
verbose_name_plural = 'Switches'
ordering = ['name']
#model = models.CharField('Modelo', max_length=200, help_text=u"Obrigatório")
serial = models.CharField('Num. Serie', max_length=30, blank=True, null=True)
place = models.ForeignKey(Place, verbose_name=u'Localização', blank=True, null=True)
rack = models.ForeignKey(Rack, blank=True, null=True)
ports = models.IntegerField('Num. Portas', help_text=u"Obrigatório")
manageable = models.BooleanField(verbose_name=u'Gerenciável (Individualmente)', default=False)
manufactorer = models.ForeignKey(Manufactorer, blank=True, null=True, verbose_name=u'Fabricante')
stacked = models.BooleanField('Empilhado', | |
<gh_stars>0
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#!/usr/bin/env python
import unittest
import torch
import torch.nn as nn
class PaddingLayerTests(unittest.TestCase):
def setUp(self):
self.filling_value = 5.0
fv = self.filling_value
self.symmetric_padding_result = torch.tensor(
[
[
[
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
]
]
]
)
self.asymmetric_padding_result = torch.tensor(
[
[
[
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
]
]
]
)
self.asymmetric_padding_for_W_result = torch.tensor(
[
[
[
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
]
]
]
)
self.delta1_for_constant_pad = torch.tensor(
[
[
[
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
]
]
]
)
self.constant_pad_backward_pass_result1 = torch.tensor(
[
[
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
]
]
]
)
self.delta2_for_constant_pad = torch.tensor(
[
[
[
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, 2.0, 2.0, 2.0, 2.0, 2.0, fv, fv, fv],
[fv, fv, fv, fv, 2.0, 2.0, 2.0, 2.0, 2.0, fv, fv, fv],
[fv, fv, fv, fv, 2.0, 2.0, 2.0, 2.0, 2.0, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
]
]
]
)
# self.backward_pass_result2 filled by 3.0 value because of torch.autograd package calculates gradient
# as sum of all results of backward() function calls
self.constant_pad_backward_pass_result2 = torch.tensor(
[
[
[
[3.0, 3.0, 3.0, 3.0, 3.0],
[3.0, 3.0, 3.0, 3.0, 3.0],
[3.0, 3.0, 3.0, 3.0, 3.0],
]
]
]
)
self.symmetric_reflection_padding_result = torch.tensor(
[
[
[
[10.0, 9.0, 8.0, 9.0, 10.0, 11.0, 10.0, 9.0],
[6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 2.0, 1.0],
[6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[10.0, 9.0, 8.0, 9.0, 10.0, 11.0, 10.0, 9.0],
[6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 2.0, 1.0],
]
]
]
)
self.asymmetric_reflection_padding_result = torch.tensor(
[
[
[
[7.0, 6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[3.0, 2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 2.0, 1.0],
[7.0, 6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[11.0, 10.0, 9.0, 8.0, 9.0, 10.0, 11.0, 10.0, 9.0],
]
]
]
)
self.reflection_pad_backward_pass_result = torch.tensor(
[
[
[
[1.0, 3.0, 3.0, 3.0, 2.0],
[3.0, 9.0, 9.0, 9.0, 6.0],
[2.0, 6.0, 6.0, 6.0, 4.0],
]
]
]
)
self.symmetric_replication_padding_result = torch.tensor(
[
[
[
[0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 5.0, 6.0, 7.0, 7.0, 7.0],
[8.0, 8.0, 8.0, 9.0, 10.0, 11.0, 11.0, 11.0],
[8.0, 8.0, 8.0, 9.0, 10.0, 11.0, 11.0, 11.0],
[8.0, 8.0, 8.0, 9.0, 10.0, 11.0, 11.0, 11.0],
]
]
]
)
self.asymmetric_replication_padding_result = torch.tensor(
[
[
[
[0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 4.0, 5.0, 6.0, 7.0, 7.0, 7.0],
[8.0, 8.0, 8.0, 8.0, 9.0, 10.0, 11.0, 11.0, 11.0],
]
]
]
)
self.replication_pad_backward_pass_result = torch.tensor(
[
[
[
[15.0, 3.0, 3.0, 3.0, 12.0],
[5.0, 1.0, 1.0, 1.0, 4.0],
[10.0, 2.0, 2.0, 2.0, 8.0],
]
]
]
)
def test_symmetric_padding_for_each_side_of_H_and_W(self):
common_padding = 3
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d(common_padding, self.filling_value)
output = model(input)
added_from_left = common_padding
added_from_right = common_padding
added_from_top = common_padding
added_from_bottom = common_padding
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(
output.size(),
torch.Size(
[
1,
1,
3 + added_from_top + added_from_bottom,
5 + added_from_left + added_from_right,
]
),
)
self.assertTrue(torch.equal(output, self.symmetric_padding_result))
def test_asymmetric_padding_for_each_side_of_H_and_W(self):
paddings = [1, 2, 3, 4]
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d(paddings, self.filling_value)
output = model(input)
added_from_left = paddings[0]
added_from_right = paddings[1]
added_from_top = paddings[2]
added_from_bottom = paddings[3]
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(
output.size(),
torch.Size(
[
1,
1,
3 + added_from_top + added_from_bottom,
5 + added_from_left + added_from_right,
]
),
)
self.assertTrue(torch.equal(output, self.asymmetric_padding_result))
def test_assymmetric_padding_only_for_W(self):
paddings = [1, 2]
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d(paddings, self.filling_value)
output = model(input)
added_from_left = paddings[0]
added_from_right = paddings[1]
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(
output.size(), torch.Size([1, 1, 3, 5 + added_from_left + added_from_right])
)
self.assertTrue(torch.equal(output, self.asymmetric_padding_for_W_result))
def test_setting_no_paddings_for_each_side_of_H_and_W(self):
no_padding = 0
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d(no_padding, 0.0)
output = model(input)
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(output.size(), input.size())
self.assertTrue(torch.equal(output, input))
no_paddings = [0, 0, 0, 0]
model = nn.ConstantPad2d(no_paddings, 0.0)
output = model(input)
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(output.size(), input.size())
self.assertTrue(torch.equal(output, input))
def test_that_cannot_be_set_different_paddings_only_for_left_right_of_W_and_top_of_H(
self,
):
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d([1, 2, 3], 0.0)
self.assertRaises(AssertionError, model, input)
def test_that_cannot_be_set_different_paddings_only_for_left_of_W(self):
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d([1], 0.0)
self.assertRaises(AssertionError, model, input)
def test_that_cannot_be_set_more_then_4_paddings(self):
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d([1, 2, 3, 4, 5], 0.0)
self.assertRaises(AssertionError, model, input)
def test_backward_computations_of_ConstantPad2d(self):
# since ConstantPad2d is functional layer (without weights)
# gradient of ConstantPad2d input is result of calculation
# of function ConstantPad2d_derivative(backward_input)
# where ConstantPad2d_derivative calculation result is tensor
# with the same shape as input and
# grad[n][c][h][w] = backward_input[n][c][h + padding_top][w + padding_left]
paddings = [4, 3, 2, 1]
input = torch.ones(1, 1, 3, 5, dtype=torch.float, requires_grad=True)
model = nn.ConstantPad2d(paddings, self.filling_value)
output = model(input)
output.backward(self.delta1_for_constant_pad)
self.assertEqual(input.grad.size(), input.size())
self.assertTrue(
torch.equal(input.grad, self.constant_pad_backward_pass_result1)
)
output.backward(self.delta2_for_constant_pad)
self.assertEqual(input.grad.size(), input.size())
self.assertTrue(
torch.equal(input.grad, self.constant_pad_backward_pass_result2)
)
def | |
= dict()
log['func_name'] = func.__name__
if self.current_queries:
best_query = self.current_queries[0]
log['old_best_query'] = best_query
log['old_precision'] = best_query.precision
log['old_recall'] = best_query.recall
log['old_score'] = best_query.score
else:
log['old_best_query'] = None
log['old_precision'] = None
log['old_recall'] = None
log['old_score'] = None
res = func(self, *args, **kwargs)
# self._sort_queries()
self._metrics_and_sort()
if self.current_queries:
best_query = self.current_queries[0]
log['best_query'] = best_query
log['precision'] = best_query.precision
log['recall'] = best_query.recall
log['score'] = best_query.score
else:
log['best_query'] = None
log['precision'] = None
log['recall'] = None
log['score'] = None
try:
self.log
except:
self.log = []
self.log.append(log)
return res
return wrapper
@time_in
@print_name
def _prune_analyzers(self, remove_duplicates=False):
"""Prune analzyers on each individual query and remove duplicate queries.
"""
import time
a = time.time()
for q in self.current_queries:
q.prune_analyzers()
b = time.time()
if remove_duplicates:
self.current_queries = list(set(self.current_queries))
c = time.time()
print('step A: {}s / step B: {}s'.format(b-a, c-a))
@time_in
@print_name
@_log_wrapper
@_query_counter_wrapper
def filter_by_extended_core(self):
"""Keep the best of each query template for all distinct extended_cores.
Keep the best combination of `boost_level`'s for each `extended_core`.
"""
queries_by_extended_core = defaultdict(list)
for query in self.current_queries:
queries_by_extended_core[query.extended_core].append(query)
self.current_queries = [sorted(queries, key=lambda x: x.score)[-1] \
for queries in queries_by_extended_core.values()]
self._metrics_and_sort()
@time_in
def filter_(self):
"""Apply filtering on current_queries."""
FILTER_BY_CORE_IDXS = [10, 20]
if self._nprl() > 1:
print('FILTERING !')
self.filter_by_precision()
self.filter_by_num_keys()
if self._nprl() in FILTER_BY_CORE_IDXS:
self.filter_by_core()
# TODO: re-score ?
self._re_score_history(call_next_row=False)
if not self.current_queries:
self.status = 'NO_QUERIES'
logging.warning('No more queries after filtering')
@time_in
def expand(self):
"""Use current state to determin whether or not to use query expansion
and which method to use.
"""
EXPAND_BY_CORE_IDXS = {11, 17}
EXPAND_BY_BOOST_IDXS = {14, 22, 30, 60, 120, 240} # TODO: smarter than that
assert not bool(EXPAND_BY_CORE_IDXS & EXPAND_BY_BOOST_IDXS)
try:
self.already_expanded
except:
self.already_expanded = set()
if self._nprl() in self.already_expanded: # TODO: all this is ugly just to have 1 expansion
return
if self._nprl() in EXPAND_BY_CORE_IDXS:
self.expand_by_core()
elif self._nprl() in EXPAND_BY_BOOST_IDXS:
self.expand_by_boost()
else:
return
self.already_expanded.add(self._nprl())
assert self.current_source_idx == self.labelled_pairs_match[-1][0]
def _nrl(self):
"""Return the current number of rows labelled."""
# TODO: num rows_labelled, take care of this
if self.num_rows_labelled:
return self.num_rows_labelled[-1]
else:
return 0
def _nprl(self):
"""Return current number of positive matches labelled."""
# TODO: num rows_labelled, take care of this
if self.num_rows_labelled:
return self.num_positive_rows_labelled[-1]
else:
return 0
@time_in
@print_name
@_log_wrapper
@_query_counter_wrapper
def filter_by_core(self):
"""Restrict each individual current query to their essential core
queries.
Remove queries for which the core score is too low
"""
MIN_SCORE = 0.1
cores = [q.core for q in self.single_core_queries if q.score <= MIN_SCORE]
self.current_queries = list({query.new_template_restricted(cores, ['must', 'should']) \
for query in self.current_queries})
self.current_queries = [x for x in self.current_queries if x is not None]
@time_in
@print_name
@_log_wrapper
@_query_counter_wrapper
def filter_by_precision(self):
"""Filter current_queries based on their precision."""
MIN_PRECISION_TAB = [(20, 0.5), (10, 0.4), (5, 0.3)]
def _min_precision(self):
"""
Return the minimum precision to keep a query template, according to
the number of rows currently labelled
"""
for min_idx, min_precision in MIN_PRECISION_TAB:
if self._nprl() >= min_idx:
break
else:
min_precision = 0
return min_precision
precisions = [x.precision for x in self.current_queries]
sorted_indices = sorted(range(len(precisions)), key=lambda x: precisions[x], reverse=True)
indices_to_keep = [i for i, x in enumerate(self.current_queries) \
if x.precision >= _min_precision(self)]
indices_to_keep += sorted_indices[len(indices_to_keep): self.MIN_NUM_QUERIES]
# Complex maneuver to avoid copying self.current_queries #TODO: change this ?
self.current_queries = [x for i, x in enumerate(self.current_queries) \
if i in indices_to_keep]
@time_in
@print_name
@_log_wrapper
@_query_counter_wrapper
def filter_by_num_keys(self):
"""Keep only the best queries.
Keep only the N best queries. N depends on the number of rows with
a match found (aka the number of positive labels). The more labels we
have, the less queries we keep.
"""
MAX_NUM_KEYS_TAB = [(20, 10), (10, 50), (7, 200), (5, 500), (0, 4000)]
def _max_num_queries(self):
"""
Max number of labels based on the number of rows currently labelled
"""
for min_idx, max_num_keys in MAX_NUM_KEYS_TAB[:-1]:
if self._nprl() >= min_idx:
break
else:
max_num_keys = MAX_NUM_KEYS_TAB[-1][1]
return max_num_keys
# Remove queries according to max number of keys
self.current_queries = self.current_queries[:_max_num_queries(self)]
@time_in
@print_name
@_log_wrapper
@_query_counter_wrapper
def expand_by_core(self):
"""Add queries to current_queries by adding fields to current_queries."""
print('EXPANDING BY CORE')
MIN_SCORE = 0.7
cores = [q for q in self.single_core_queries if q.score >= MIN_SCORE]
self.current_queries = list({x for query in self.current_queries \
for x in query.multiply_by_core(cores, ['must'])})
# NB: query analyzer pruning should be done in multiply_by_cores
# TODO: Move back into expand
self._re_score_history(call_next_row=False) # Also sorts results
self.filter_by_extended_core()
self.filter_()
@time_in
@print_name
@_log_wrapper
@_query_counter_wrapper
def expand_by_boost(self):
"""Add queries to current_queries by varying boost levels."""
print('EXPANDING BY BOOST')
self.current_queries = list({x for query in self.current_queries \
for x in query.multiply_by_boost(2)})
# TODO: Move back into expand
self._re_score_history(call_next_row=False) # Also sorts results
self.filter_by_extended_core()
self.filter_()
@print_name
def export_best_params(self):
"""Return the parameters for the best query for matching (use in `es_linker`)."""
params = dict()
params['index_name'] = self.ref_index_name
params['queries'] = [{'template': q._as_tuple(),
'thresh': 0,
'best_thresh': q.thresh if q.thresh else 0,
'expected_precision': q.precision,
'expected_recall': q.recall} \
for q in self.current_queries[:self.num_queries_sorted]]
# Sort queries by precision if possible
assert all(x['expected_precision'] is None for x in params['queries']) \
or all(x['expected_precision'] is not None for x in params['queries'])
if params['queries'][0]['expected_precision'] is not None:
params['queries'] = sorted(params['queries'],
key=lambda x: x['expected_precision'], reverse=True)
params['must'] = self.must_filters
params['must_not'] = self.must_not_filters
params['exact_pairs'] = [p for p in self.labelled_pairs if self.labels[p] == 'y']
params['non_matching_pairs'] = [p for p in self.labelled_pairs if self.labels[p] == 'n']
params['forgotten_pairs'] = [p for p in self.labelled_pairs if self.labels[p] == 'f']
return params
def write_training(self, file_path): # DONE
params = self.export_best_params()
encoder = MyEncoder()
with open(file_path, 'w') as w:
w.write(encoder.encode(params))
@print_name
def update_musts(self, must_filters, must_not_filters):
if (not isinstance(must_filters, dict)) or (not isinstance(must_not_filters, dict)):
raise ValueError('Variables "must" and "must_not" should be dicts' \
'with keys being column names and values a list of strings')
self.must_filters = must_filters
self.must_not_filters = must_not_filters
self.status = 'ACTIVE' # If not 'ACTIVE' this will be fixed in _next_row
self._re_score_history(call_next_row=True)
self._sanity_check()
@print_name
def update_targets(self, t_p, t_r):
self.TARGET_PRECISION = t_p
self.TARGET_RECALL = t_r
# Re-score metrics
self._metrics_and_sort()
# def next_items(self, max_num_items):
def to_emit(self):
"""Creates a dict to be sent to the template."""#TODO: fix this
dict_to_emit = dict()
# Status
dict_to_emit['status'] = self.status
# Info on labeller
dict_to_emit['t_p'] = self.TARGET_PRECISION
dict_to_emit['t_r'] = self.TARGET_RECALL
dict_to_emit['has_previous'] = bool(len(self.labels))
dict_to_emit['must_filters'] = self.must_filters
dict_to_emit['must_not_filters'] = self.must_not_filters
# Info on labeller (counts)
dict_to_emit['num_pos'] = sum(self.VALID_ANSWERS[x]=='y' for x in self.labels.values())
dict_to_emit['num_neg'] = sum(self.VALID_ANSWERS[x]=='n' for x in self.labels.values())
dict_to_emit['num_unc'] = sum(self.VALID_ANSWERS[x]=='u' for x in self.labels.values())
dict_to_emit['num_for'] = sum(self.VALID_ANSWERS[x]=='f' for x in self.labels.values())
# Info on current query
# TODO: on previous, current_query is no longer valid
dict_to_emit['query_ranking'] = self.current_query_ranking
if self.current_query_ranking != -1:
# best_query = self.current_queries[0]
# dict_to_emit['query'] = best_query._as_tuple()
#dict_to_emit['estimated_score'] = best_query.score
# dict_to_emit['thresh'] = best_query.thresh
current_query = self.current_query
dict_to_emit['c_query'] = current_query._as_tuple()
dict_to_emit['c_estimated_precision'] = current_query.precision
dict_to_emit['c_estimated_recall'] = current_query.recall
dict_to_emit['c_estimated_score'] = current_query.score
dict_to_emit['c_thresh'] = current_query.thresh
dict_to_emit['estimated_precision'] = self.estimated_precision
dict_to_emit['estimated_recall'] = self.estimated_recall
dict_to_emit['num_queries_sorted'] = self.num_queries_sorted
# Info on pair
dict_to_emit['source_idx'] = self.current_source_idx
dict_to_emit['ref_idx'] = self.current_ref_idx
if isinstance(self.current_source_item, dict):
csi = self.current_source_item
else:
csi = self.current_source_item.to_dict()
dict_to_emit['source_item'] = {'_id': self.current_source_idx,
'_source': csi}
dict_to_emit['ref_item'] = {'_id': self.current_ref_idx,
'_score': self.current_es_score,
'_source': self.current_ref_item}
dict_to_emit['top_ref_items'] = self._ref_rows_for_current_source_row(20, 10**9)
dict_to_emit['es_score'] = self.current_es_score
dict_to_emit['majority_vote'] = self.majority_vote(10)
# Estimate if the current pair is considered to be a match or not
# (only if we have access to es_score and current query threshold)
if self.current_query_ranking != -1:
if (self.current_es_score is not None) and (current_query.thresh is not None):
dict_to_emit['estimated_is_match'] = self.current_es_score >= current_query.thresh
else:
dict_to_emit['estimated_is_match'] = None
return dict_to_emit
class SearchLabeller(BasicLabeller):
"""
Extends the BasicLabeller class by providing tools for custom search.
"""
def to_dict(self):
"""Returns a dict representation of the instance."""
custom_searches = | |
SLHA_TABLE = '''
# ISAJET SUSY parameters in SUSY Les Houches Accord 2 format
# Created by ISALHA 2.0 Last revision: <NAME> 27 May 2014
Block SPINFO # Program information
1 ISASUGRA/ISASUSY from ISAJET # Spectrum Calculator
2 7.88 02-JAN-2018 11:01:14 # Version number
Block MODSEL # Model selection
1 2 # Minimal gauge mediated (GMSB) model
Block SMINPUTS # Standard Model inputs
1 1.28000000E+02 # alpha_em^(-1)
2 1.16570000E-05 # G_Fermi
3 1.19999997E-01 # alpha_s(M_Z)
4 9.11699982E+01 # m_{Z}(pole)
5 4.19999981E+00 # m_{b}(m_{b})
6 1.73100006E+02 # m_{top}(pole)
7 1.77699995E+00 # m_{tau}(pole)
Block MINPAR # SUSY breaking input parameters
1 4.50000000E+05 # Lambda scale of soft SSB
2 9.00000000E+05 # M_mess overall messenger scale
3 1.50000000E+01 # tan(beta)
4 1.00000000E+00 # sign(mu)
5 1.00000000E+00 # N_5 messenger index
6 6.51833008E+02 # c_grav gravitino mass factor
51 1.00000000E+00 # N5_1 U(1)_Y messenger index
52 1.00000000E+00 # N5_2 SU(2)_L messenger index
53 1.00000000E+00 # N5_3 SU(3)_C messenger index
101 1.00000000E+00 # Rsl
102 0.00000000E+00 # dmH_d^2
103 0.00000000E+00 # dmH_u^2
104 0.00000000E+00 # d_Y
Block MASS # Scalar and gaugino mass spectrum
# PDG code mass particle
6 1.73100006E+02 # top
24 8.04229965E+01 # W^+
25 1.20551857E+02 # h^0
35 2.12422729E+03 # H^0
36 2.11030371E+03 # A^0
37 2.12573218E+03 # H^+
1000001 4.51387402E+03 # dnl
1000002 4.51315869E+03 # upl
1000003 4.51387402E+03 # stl
1000004 4.51315869E+03 # chl
1000005 4.24461133E+03 # b1
1000006 3.88973877E+03 # t1
1000011 1.57330334E+03 # el-
1000012 1.56308667E+03 # nuel
1000013 1.57330334E+03 # mul-
1000014 1.56308667E+03 # numl
1000015 7.88488892E+02 # tau1
1000016 1.55446924E+03 # nutl
1000021 3.26672119E+03 # glss
1000022 6.49607178E+02 # z1ss
1000023 1.23292749E+03 # z2ss
1000024 1.23316174E+03 # w1ss
1000025 -1.47747803E+03 # z3ss
1000035 1.48980066E+03 # z4ss
1000037 1.49091223E+03 # w2ss
1000039 6.35066899E-05 # gvss
2000001 4.26391797E+03 # dnr
2000002 4.28836816E+03 # upr
2000003 4.26391797E+03 # str
2000004 4.28836816E+03 # chr
2000005 4.34371777E+03 # b2
2000006 4.37019678E+03 # t2
2000011 7.86332153E+02 # er-
2000013 7.86332153E+02 # mur-
2000015 1.56533508E+03 # tau2
Block ALPHA # Effective Higgs mixing parameter
-6.68973103E-02 # alpha
Block STOPMIX # stop mixing matrix
1 1 2.27555223E-02 # O_{11}
1 2 9.99741077E-01 # O_{12}
2 1 -9.99741077E-01 # O_{21}
2 2 2.27555223E-02 # O_{22}
Block SBOTMIX # sbottom mixing matrix
1 1 8.59610587E-02 # O_{11}
1 2 9.96298492E-01 # O_{12}
2 1 -9.96298492E-01 # O_{21}
2 2 8.59610587E-02 # O_{22}
Block STAUMIX # stau mixing matrix
1 1 1.81620419E-02 # O_{11}
1 2 9.99835074E-01 # O_{12}
2 1 -9.99835074E-01 # O_{21}
2 2 1.81620419E-02 # O_{22}
Block NMIX # neutralino mixing matrix
1 1 9.99189913E-01 #
1 2 -2.43789563E-03 #
1 3 3.62787545E-02 #
1 4 -1.72411501E-02 #
2 1 -1.09320292E-02 #
2 2 -9.75383639E-01 #
2 3 1.67687014E-01 #
2 4 -1.42787591E-01 #
3 1 1.34054422E-02 #
3 2 -1.80797763E-02 #
3 3 -7.06560373E-01 #
3 4 -7.07294643E-01 #
4 1 -3.63336578E-02 #
4 2 2.19758555E-01 #
4 3 6.86540365E-01 #
4 4 -6.92133248E-01 #
Block UMIX # chargino U mixing matrix
1 1 -9.73248541E-01 # U_{11}
1 2 2.29754820E-01 # U_{12}
2 1 -2.29754820E-01 # U_{21}
2 2 -9.73248541E-01 # U_{22}
Block VMIX # chargino V mixing matrix
1 1 -9.80782092E-01 # V_{11}
1 2 1.95106342E-01 # V_{12}
2 1 -1.95106342E-01 # V_{21}
2 2 -9.80782092E-01 # V_{22}
Block GAUGE Q= 3.99986890E+03 #
1 3.57524991E-01 # g`
2 6.52378619E-01 # g_2
3 1.21928000E+00 # g_3
Block YU Q= 3.99986890E+03 #
3 3 8.26207161E-01 # y_t
Block YD Q= 3.99986890E+03 #
3 3 1.80895194E-01 # y_b
Block YE Q= 3.99986890E+03 #
3 3 1.53140575E-01 # y_tau
Block HMIX Q= 3.99986890E+03 # Higgs mixing parameters
1 1.45950818E+03 # mu(Q)
2 1.42720318E+01 # tan(beta)(Q)
3 2.52109192E+02 # Higgs vev at Q
4 4.45338200E+06 # m_A^2(Q)
Block MSOFT Q= 3.99986890E+03 # DRbar SUSY breaking parameters
1 6.61775879E+02 # M_1(Q)
2 1.19799866E+03 # M_2(Q)
3 2.97602686E+03 # M_3(Q)
21 2.20187325E+06 # MHd^2(Q)
22 -1.85443900E+06 # MHu^2(Q)
31 1.56049634E+03 # MeL(Q)
32 1.56049634E+03 # MmuL(Q)
33 1.55207776E+03 # MtauL(Q)
34 7.86154358E+02 # MeR(Q)
35 7.86154358E+02 # MmuR(Q)
36 7.74571350E+02 # MtauR(Q)
41 4.36809131E+03 # MqL1(Q)
42 4.36809131E+03 # MqL2(Q)
43 4.20987500E+03 # MqL3(Q)
44 4.14014160E+03 # MuR(Q)
45 4.14014160E+03 # McR(Q)
46 3.80033887E+03 # MtR(Q)
47 4.11510986E+03 # MdR(Q)
48 4.11510986E+03 # MsR(Q)
49 4.10062988E+03 # MbR(Q)
Block AU Q= 3.99986890E+03 #
1 1 -8.62451172E+02 # A_u
2 2 -8.62451172E+02 # A_c
3 3 -8.62451172E+02 # A_t
Block AD Q= 3.99986890E+03 #
1 1 -9.58785889E+02 # A_d
2 2 -9.58785889E+02 # A_s
3 3 -9.58785889E+02 # A_b
Block AE Q= 3.99986890E+03 #
1 1 -1.18693092E+02 # A_e
2 2 -1.18693092E+02 # A_mu
3 3 -1.18693092E+02 # A_tau
# ISAJET decay tables in SUSY Les Houches accord format
# Created by ISALHD. Last revision: <NAME>, 2005 May 25
Block DCINFO # Program information
1 ISASUGRA from ISAJET # Spectrum Calculator
2 7.88 02-JAN-2018 11:01:14 # Version number
# PDG Width
DECAY 6 1.48575687E+00 # TP decays
# BR NDA ID1 ID2 ID3 ID4
3.33333313E-01 3 2 -1 5 # TP --> UP DB BT
3.33333313E-01 3 4 -3 5 # TP --> CH SB BT
1.11111097E-01 3 -11 12 5 # TP --> E+ NUE BT
1.11111097E-01 3 -13 14 5 # TP --> MU+ NUM BT
1.11111097E-01 3 -15 16 5 # TP --> TAU+ NUT BT
# PDG Width
DECAY 1000021 7.99780861E-02 # GLSS decays
# BR NDA ID1 ID2 ID3 ID4
4.72715348E-02 3 1000024 1 -2 # GLSS --> W1SS+ DN UB
4.72715348E-02 3 -1000024 2 -1 # GLSS --> W1SS- UP DB
4.72715348E-02 3 1000024 3 -4 # GLSS --> W1SS+ ST CB
4.72715348E-02 3 -1000024 4 -3 # GLSS --> W1SS- CH SB
5.42371161E-02 3 1000024 5 -6 # GLSS --> W1SS+ BT TB
5.42371161E-02 3 -1000024 6 -5 # GLSS --> W1SS- TP BB
1.50775688E-03 3 1000037 1 -2 # GLSS --> W2SS+ DN UB
1.50775688E-03 3 -1000037 2 -1 # GLSS --> W2SS- UP DB
1.50775688E-03 3 1000037 3 -4 # GLSS --> W2SS+ ST CB
1.50775688E-03 3 -1000037 4 -3 # GLSS --> W2SS- CH SB
1.02795750E-01 3 1000037 5 -6 # GLSS --> W2SS+ BT TB
1.02795750E-01 3 -1000037 6 -5 # GLSS --> W2SS- TP BB
1.14910990E-05 2 1000022 21 # GLSS --> Z1SS GL
3.16029377E-02 3 1000022 2 -2 # GLSS --> Z1SS UP UB
9.26471874E-03 3 1000022 1 -1 # GLSS --> Z1SS DN DB
9.26471874E-03 3 1000022 3 -3 # GLSS --> Z1SS ST SB
3.16029377E-02 3 1000022 4 -4 # GLSS --> Z1SS CH CB
9.83522926E-03 3 1000022 5 -5 # GLSS --> Z1SS BT BB
5.09000644E-02 3 1000022 6 -6 # GLSS --> Z1SS TP TB
9.82445272E-05 2 1000023 21 # GLSS --> Z2SS GL
2.36736257E-02 3 1000023 2 -2 # GLSS --> Z2SS UP UB
2.34574024E-02 3 1000023 1 -1 # GLSS --> Z2SS DN DB
2.34574024E-02 3 1000023 3 -3 # GLSS --> Z2SS ST SB
2.36736257E-02 3 1000023 4 -4 # GLSS --> Z2SS CH CB
2.96215620E-02 3 1000023 5 -5 # GLSS --> Z2SS BT BB
2.48120874E-02 3 1000023 6 -6 # GLSS --> Z2SS TP TB
1.50513893E-03 2 1000025 21 # GLSS --> Z3SS GL
2.76464721E-06 3 1000025 2 -2 # GLSS --> Z3SS UP UB
3.33884441E-06 3 1000025 1 -1 # GLSS --> Z3SS DN DB
3.33884441E-06 3 1000025 3 -3 # GLSS --> Z3SS ST SB
2.76464721E-06 3 1000025 4 -4 # GLSS --> Z3SS CH CB
3.97400465E-03 3 1000025 | |
<gh_stars>0
#!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
import experiments.nmt
from experiments.nmt import \
RNNEncoderDecoder, \
prototype_phrase_state, \
parse_input
from experiments.nmt.numpy_compat import argpartition
logger = logging.getLogger(__name__)
class Timer(object):
def __init__(self):
self.total = 0
def start(self):
self.start_time = time.time()
def finish(self):
self.total += time.time() - self.start_time
class BeamSearch(object):
def __init__(self, enc_dec):
self.enc_dec = enc_dec
state = self.enc_dec.state
self.eos_id = state['null_sym_target']
self.unk_id = state['unk_sym_target']
def compile(self):
self.comp_repr = self.enc_dec.create_representation_computer()
self.comp_init_states = self.enc_dec.create_initializers()
self.comp_next_probs = self.enc_dec.create_next_probs_computer()
self.comp_next_states = self.enc_dec.create_next_states_computer()
def search(self, seq, n_samples, prefix=None, ignore_unk=False, minlen=1, verbose=False):
c = self.comp_repr(seq)[0]
states = map(lambda x: x[None, :], self.comp_init_states(c))
dim = states[0].shape[1]
num_levels = len(states)
fin_trans = []
fin_costs = []
trans = [[]]
costs = [0.0]
for k in range(3 * len(seq)):
if n_samples == 0:
break
# Compute probabilities of the next words for
# all the elements of the beam.
beam_size = len(trans)
last_words = (numpy.array(map(lambda t: t[-1], trans))
if k > 0
else numpy.zeros(beam_size, dtype="int64"))
log_probs = numpy.log(self.comp_next_probs(c, k, last_words, *states)[0])
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:, self.unk_id] = -numpy.inf
if k < minlen:
log_probs[:, self.eos_id] = -numpy.inf
if prefix is not None and k < len(prefix):
log_probs[:, :] = -numpy.inf
log_probs[:, prefix[k]] = 0.
# Find the best options by calling argpartition of flatten array
next_costs = numpy.array(costs)[:, None] - log_probs
flat_next_costs = next_costs.flatten()
best_costs_indices = argpartition(
flat_next_costs.flatten(),
n_samples)[:n_samples]
# Decypher flatten indices
voc_size = log_probs.shape[1]
trans_indices = best_costs_indices / voc_size
word_indices = best_costs_indices % voc_size
costs = flat_next_costs[best_costs_indices]
# Form a beam for the next iteration
new_trans = [[]] * n_samples
new_costs = numpy.zeros(n_samples)
new_states = [numpy.zeros((n_samples, dim), dtype="float32") for level
in range(num_levels)]
inputs = numpy.zeros(n_samples, dtype="int64")
for i, (orig_idx, next_word, next_cost) in enumerate(
zip(trans_indices, word_indices, costs)):
new_trans[i] = trans[orig_idx] + [next_word]
new_costs[i] = next_cost
for level in range(num_levels):
new_states[level][i] = states[level][orig_idx]
inputs[i] = next_word
new_states = self.comp_next_states(c, k, inputs, *new_states)
# Filter the sequences that end with end-of-sequence character
trans = []
costs = []
indices = []
for i in range(n_samples):
if new_trans[i][-1] != self.enc_dec.state['null_sym_target']:
trans.append(new_trans[i])
costs.append(new_costs[i])
indices.append(i)
else:
n_samples -= 1
fin_trans.append(new_trans[i])
fin_costs.append(new_costs[i])
states = map(lambda x: x[indices], new_states)
# Dirty tricks to obtain any translation
if not len(fin_trans):
if ignore_unk:
logger.warning("Did not manage without UNK")
return self.search(seq, n_samples, False, minlen)
elif n_samples < 500:
logger.warning("Still no translations: try beam size {}".format(n_samples * 2))
return self.search(seq, n_samples * 2, False, minlen)
else:
logger.error("Translation failed")
fin_trans = numpy.array(fin_trans)[numpy.argsort(fin_costs)]
fin_costs = numpy.array(sorted(fin_costs))
return fin_trans, fin_costs
def indices_to_words(i2w, seq):
sen = []
for k in xrange(len(seq)):
if i2w[seq[k]] == '<eol>':
break
sen.append(i2w[seq[k]])
return sen
def sample(lm_model, seq, n_samples, prefix=None,
sampler=None, beam_search=None,
ignore_unk=False, normalize=False,
alpha=1, verbose=False):
if beam_search:
sentences = []
trans, costs = beam_search.search(seq, n_samples, prefix=prefix,
ignore_unk=ignore_unk, minlen=len(seq) / 2, verbose=verbose)
if normalize:
counts = [len(s) for s in trans]
costs = [co / cn for co, cn in zip(costs, counts)]
for i in range(len(trans)):
sen = indices_to_words(lm_model.target_language.indx_word, trans[i])
sentences.append(" ".join(sen))
for i in range(len(costs)):
if verbose:
logger.log(2,"{}: {}".format(costs[i], sentences[i]))
return sentences, costs, trans
elif sampler:
sentences = []
all_probs = []
costs = []
values, cond_probs = sampler(n_samples, 3 * (len(seq) - 1), alpha, seq)
for sidx in xrange(n_samples):
sen = []
for k in xrange(values.shape[0]):
if lm_model.target_language.indx_word[values[k, sidx]] == '<eol>':
break
sen.append(lm_model.target_language.indx_word[values[k, sidx]])
sentences.append(" ".join(sen))
probs = cond_probs[:, sidx]
probs = numpy.array(cond_probs[:len(sen) + 1, sidx])
all_probs.append(numpy.exp(-probs))
costs.append(-numpy.sum(probs))
if normalize:
counts = [len(s.strip().split(" ")) for s in sentences]
costs = [co / cn for co, cn in zip(costs, counts)]
sprobs = numpy.argsort(costs)
if verbose:
for pidx in sprobs:
logger.log(2, "Hypotheses {}: {} {} {}\n".format(pidx, -costs[pidx], all_probs[pidx], sentences[pidx]))
return sentences, costs, None
else:
raise Exception("I don't know what to do")
def parse_args():
parser = argparse.ArgumentParser(
"Sample (of find with beam-serch) translations from a translation model")
parser.add_argument("--state",
required=True, help="State to use")
parser.add_argument("--beam-search",
action="store_true", help="Beam size, turns on beam-search")
parser.add_argument("--beam-size",
type=int, help="Beam size")
parser.add_argument("--ignore-unk",
default=False, action="store_true",
help="Ignore unknown words")
parser.add_argument("--source",
help="File of source sentences")
parser.add_argument("--trans",
help="File to save translations in")
parser.add_argument("--normalize",
action="store_true", default=False,
help="Normalize log-prob with the word count")
parser.add_argument("--verbose",
action="store_true", default=False,
help="Be verbose")
parser.add_argument("model_path",
help="Path to the model")
parser.add_argument("--interactive",
default=False, action="store_true",
help="Interactive post-editing?")
parser.add_argument("--references",
help="Reference sentence (for computing WSR)")
parser.add_argument("--save-original",
default=False, action="store_true",
help="Interactive post-editing?")
parser.add_argument("--save-original-to",
help="Save original hypotheses to")
parser.add_argument("changes",
nargs="?", default="",
help="Changes to state")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_phrase_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
logging.basicConfig(level=getattr(logging, state['level']),
format=" %(asctime)s: %(name)s: %(levelname)s: %(message)s")
if args.verbose:
logger.setLevel(level=logging.DEBUG)
logger.debug("I'm being verbose!")
else:
logger.setLevel(level=logging.INFO)
rng = numpy.random.RandomState(state['seed'])
enc_dec = RNNEncoderDecoder(state, rng, skip_init=True)
enc_dec.build()
lm_model = enc_dec.create_lm_model()
lm_model.load(args.model_path)
indx_word = cPickle.load(open(state['word_indx'], 'rb'))
sampler = None
beam_search = None
if args.beam_search:
beam_search = BeamSearch(enc_dec)
beam_search.compile()
else:
sampler = enc_dec.create_sampler(many_samples=True)
idict_src = cPickle.load(open(state['indx_word'], 'r'))
idict_trg = cPickle.load(open(state['word_indx_trgt'], 'r'))
unk_id = state['unk_sym_target']
if args.source and args.trans:
# Actually only beam search is currently supported here
assert beam_search
assert args.beam_size
try:
fsrc = open(args.source, 'r')
ftrans = open(args.trans, 'w')
logger.info("Storing corrected hypotheses into: %s" % str(args.trans))
if args.save_original:
logger.info("Storing original hypotheses into: %s" % str(args.save_original_to))
ftrans_ori = open(args.save_original_to, 'w')
if not args.interactive:
assert args.references is not None, "Automatic mode requires a reference file!"
ftrg = open(args.references, 'r')
target_lines = ftrg.read().split('\n')
if target_lines[-1] == '':
target_lines = target_lines[:-1]
start_time = time.time()
n_samples = args.beam_size
total_cost = 0.0
logging.info("Beam size: {}".format(n_samples))
total_errors = 0
total_words = 0
if args.interactive:
for n_line, line in enumerate(fsrc):
errors_sentence = 0
index_prefix = None
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
hypothesis_number = 0
correct_word = -1
while correct_word != 0:
trans, costs, _ = sample(lm_model, seq, n_samples, prefix=index_prefix, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk,
normalize=args.normalize, verbose=args.verbose)
best = numpy.argmin(costs)
hypothesis = trans[best].split()
print "Sentence %d. Hypothesis %d: %s" % (n_line, hypothesis_number, " ".join(hypothesis))
correct_word = int(raw_input('Select word to correct (1 - %d).'
' Word 0 means that the sentence is correct: ' % len(hypothesis)))
if correct_word == 0:
print >> ftrans, hypothesis
else:
errors_sentence += 1
hypothesis_number += 1
new_word = raw_input('Substitute %s by: ' % hypothesis[correct_word - 1])
prefix = hypothesis[:correct_word - 1] + [new_word]
print "New prefix: %s" % (" ".join(prefix))
index_prefix = map(lambda x: idict_trg[x], prefix)
else:
for n_line, line in enumerate(fsrc):
errors_sentence = 0
index_prefix = None
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
if args.verbose:
logger.debug("\n \n Processing sentence %d" % (n_line + 1))
logger.debug("Source: %s" % line[:-1])
logger.debug("Desired translation: %s\n" % target_lines[n_line])
reference = target_lines[n_line].split()
checked_index = 0
unk_words = []
unk_indices = []
first_hypo = True
prefix = None
while checked_index < len(reference):
trans, costs, _ = sample(lm_model, seq, n_samples, prefix=index_prefix, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk,
normalize=args.normalize, verbose=args.verbose)
best = numpy.argmin(costs)
hypothesis = trans[best].split()
if args.verbose:
if first_hypo:
logger.debug("Hypothesis %d: %s" % (errors_sentence, " ".join(hypothesis)))
else:
logger.debug("\t prefix : %s" % (" ".join(prefix)))
logger.debug("\t new hyp: %s" % (" ".join(hypothesis)))
if args.save_original and first_hypo:
print >> ftrans_ori, " ".join(hypothesis)
first_hypo = False
if len(unk_indices) > 0: # If we added some UNK word
if len(hypothesis) < len(unk_indices): # The full hypothesis will be made up UNK words:
for i, index in enumerate(range(0, len(hypothesis))):
hypothesis[index] = unk_words[unk_indices[i]]
for ii in range(i+1, len(unk_words)):
hypothesis.append(unk_words[ii])
else: # We put each unknown word in the corresponding gap
for i, index in enumerate(unk_indices):
if index < len(hypothesis):
hypothesis[index] = unk_words[i]
else:
hypothesis.append(unk_words[i])
while checked_index < len(reference): # We check all words in the reference
if checked_index >= len(hypothesis):
errors_sentence += 1
new_word = reference[checked_index]
prefix = hypothesis + [new_word]
index_prefix = map(lambda x: idict_trg[x] if idict_trg.get(x) is not None
else unk_id, prefix)
if idict_trg.get(new_word) is None:
unk_words.append(new_word)
unk_indices.append(checked_index)
logger.debug('Error case 0! ->Add new word " % s" to the end of the hypothesis. '
'Errors: %d' % (new_word, errors_sentence))
break
elif hypothesis[checked_index] != reference[checked_index]:
correct_prefix = checked_index
errors_sentence += 1
new_word = reference[checked_index]
prefix = hypothesis[:correct_prefix] + [new_word]
logger.debug('Error case 1! -> Substitute word " % s" in hypothesis by word " % s".'
' Errors: %d' % (hypothesis[checked_index], new_word, errors_sentence))
index_prefix = map(lambda x: idict_trg[x] if idict_trg.get(x) is | |
import os
import shutil
import time
import glob
import subprocess
import web
from libs import utils, form_utils
from libs.logger import logger
import settings
subscription_versions = ['normal', 'nomail', 'digest']
def __get_ml_dir(mail):
"""Get absolute path of the root directory of mailing list account."""
if not utils.is_email(mail):
return None
mail = str(mail).lower()
(_username, _domain) = mail.split('@', 1)
return os.path.join(settings.MLMMJ_SPOOL_DIR, _domain, _username)
def __get_ml_subscribers_dir(mail, subscription):
"""
Get absolute path of the directory used to store subscribers which
subscribed to given subscription version.
@mail -- mail address of mailing list account
@subscription -- subscription version: normal, nomail, digest.
"""
if subscription == 'digest':
return os.path.join(__get_ml_dir(mail=mail), 'digesters.d')
elif subscription == 'nomail':
return os.path.join(__get_ml_dir(mail=mail), 'nomailsubs.d')
else:
# subscription == 'normal'
return os.path.join(__get_ml_dir(mail=mail), 'subscribers.d')
def __remove_ml_sub_dir(mail, dirname):
if not dirname:
return (True, )
_ml_dir = __get_ml_dir(mail=mail)
_sub_dir = os.path.join(_ml_dir, dirname)
if os.path.exists(_sub_dir):
try:
shutil.rmtree(_sub_dir)
logger.debug("[{0}] {1}, removed sub-directory: {2}".format(web.ctx.ip, mail, _sub_dir))
except Exception as e:
logger.error("[{0}] {1}, error while removing sub-directory: {2}".format(web.ctx.ip, mail, _sub_dir))
return (False, repr(e))
return (True, )
def __set_file_permission(path):
_uid = os.getuid()
_gid = os.getgid()
try:
os.chown(path, _uid, _gid)
return (True, )
except Exception as e:
return (False, repr(e))
def __copy_dir_files(src, dest, create_dest=True):
"""Copy all regular files under source directory to dest directory."""
if create_dest:
if not os.path.exists(dest):
try:
os.makedirs(dest, mode=settings.MLMMJ_FILE_PERMISSION)
except Exception as e:
return (False, repr(e))
for fn in os.listdir(src):
_src_file = os.path.join(src, fn)
if os.path.isfile(_src_file):
shutil.copy(_src_file, dest)
return (True, )
def __has_ml_dir(mail, path=None):
if path:
_ml_dir = path
else:
_ml_dir = __get_ml_dir(mail=mail)
if os.path.exists(_ml_dir):
return True
else:
return False
def __has_param_file(f):
if os.path.exists(f):
return True
else:
return False
def __get_param_file(mail, param):
"""Get path to the file used to control parameter setting.
Sample value: /var/spool/mlmmj/<domain>/<username>/control/<param>
"""
if not utils.is_email(mail):
return None
(_username, _domain) = mail.split('@', 1)
return os.path.join(settings.MLMMJ_SPOOL_DIR,
_domain,
_username,
'control',
param)
def __remove_file(path):
if os.path.exists(path):
try:
os.remove(path)
except Exception as e:
logger.error("[{0}] error while removing parameter file: {1}, {2}".format(web.ctx.ip, path, e))
return (False, repr(e))
return (True, )
def __remove_param_file(mail, param):
_path = __get_param_file(mail=mail, param=param)
return __remove_file(_path)
def __get_param_type(param):
"""Get parameter type.
Possible param type must be one of: boolean, list, normal, text, or None (no such
param).
"""
for (_type, _param_dict) in list(settings.MLMMJ_PARAM_TYPES.items()):
if param in list(_param_dict.values()):
return _type
return None
def __get_boolean_param_value(mail, param):
_param_file = __get_param_file(mail=mail, param=param)
if __has_param_file(_param_file):
return 'yes'
else:
return 'no'
def __get_list_param_value(mail, param, is_email=False, param_file=None):
if not param_file:
param_file = __get_param_file(mail=mail, param=param)
_values = []
if __has_param_file(param_file):
try:
with open(param_file, "r", encoding="utf-8") as f:
_lines = f.readlines()
_lines = [_line.strip() for _line in _lines] # remove line breaks
_values = [_line for _line in _lines if _line] # remove empty values
if is_email:
_values = [str(i).lower() for i in _values]
except IOError:
# No such file.
pass
except Exception as e:
logger.error('Error while getting (list) parameter value: {0} -> {1}'.format(param, e))
_values.sort()
return _values
def __get_normal_param_value(mail, param, param_file=None):
# Only first line is used by mlmmj.
if not param_file:
param_file = __get_param_file(mail=mail, param=param)
try:
with open(param_file, 'r', encoding='utf-8') as f:
# Remove newline but keep spaces.
value = f.readline().rstrip('\n')
return value
except IOError:
# No such file.
return ''
except Exception as e:
logger.error("[{0}] {1}, error while getting parameter value: {2}, {3}".format(web.ctx.ip, mail, param, e))
return ''
def __get_text_param_value(mail, param, param_file=None):
# Full content is used by mlmmj.
if not param_file:
param_file = __get_param_file(mail=mail, param=param)
try:
with open(param_file, 'r', encoding='utf-8') as f:
value = f.read().rstrip('\n')
return value
except IOError:
# No such file.
return ''
except Exception as e:
logger.error("[{0}] {1}, error while getting parameter value: {2}, {3}".format(web.ctx.ip, mail, param, e))
return ''
def __get_other_param_value(mail, param):
if param in settings.MLMMJ_OTHER_PARAM_MAP:
_v = settings.MLMMJ_OTHER_PARAM_MAP[param]
_param_type = _v['type']
_mlmmj_param = _v['mlmmj_param']
_is_email = _v.get('is_email', False)
if _param_type == 'boolean':
return __get_boolean_param_value(mail=mail, param=_mlmmj_param)
elif _param_type == 'list':
return __get_list_param_value(mail, param=_mlmmj_param, is_email=_is_email)
elif _param_type == 'normal':
return __get_normal_param_value(mail, param=_mlmmj_param)
elif _param_type == 'text':
return __get_text_param_value(mail, param=_mlmmj_param)
return 'INVALID_PARAM'
def __get_param_value(mail, param):
"""Get value of given mailing list parameter.
Possible returned values:
- (False, <error_reason>)
- (True, {'type': 'boolean', 'value': 'yes|no'})
- (True, {'type': 'list', 'value': [...]})
- (True, {'type': 'normal', 'value': '...'})
- (True, {'type': 'text', 'value': '...'})
"""
if param in settings.MLMMJ_OTHER_WEB_PARAMS:
_v = settings.MLMMJ_OTHER_PARAM_MAP[param]
_param_type = _v['type']
_value = __get_other_param_value(mail=mail, param=param)
return (True, {'type': _param_type, 'value': _value})
if param not in settings.MLMMJ_PARAM_NAMES:
logger.error("[{0}] {1}, unknown parameter: {2}".format(web.ctx.ip, mail, param))
return (False, 'INVALID_PARAM')
_param_file = __get_param_file(mail=mail, param=param)
_param_type = __get_param_type(param=param)
_ret = {'type': _param_type, 'value': None}
# control file doesn't exist
if not __has_param_file(_param_file):
if _param_type == 'list':
_ret['value'] = []
elif _param_type == 'boolean':
_ret['value'] = 'no'
else:
_ret['value'] = ''
return (True, _ret)
if _param_type == 'boolean':
_ret['value'] = 'yes'
else:
if _param_type == 'text':
_func = __get_text_param_value
elif _param_type == 'list':
_func = __get_list_param_value
else:
# _param_type == 'normal':
_func = __get_normal_param_value
_ret['value'] = _func(mail=mail, param=param, param_file=_param_file)
return (True, _ret)
def __update_boolean_param(mail,
param,
value,
param_file=None,
touch_instead_of_create=False):
"""Create or remove parameter file for boolean type parameter.
@touch_instead_of_create - touch parameter file instead of re-create it.
"""
if not param_file:
param_file = __get_param_file(mail=mail, param=param)
if value == 'yes':
try:
if touch_instead_of_create:
open(param_file, 'a', encoding='utf-8').close()
else:
open(param_file, 'w', encoding='utf-8').close()
# Avoid some conflicts
if param == 'subonlypost':
__remove_param_file(mail=mail, param='modonlypost')
if param == 'modonlypost':
__remove_param_file(mail=mail, param='subonlypost')
# Create 'control/moderated' also
_f = __get_param_file(mail=mail, param='moderated')
open(_f, 'a', encoding='utf-8').close()
except Exception as e:
logger.error("[{0}] {1}, error while updating (boolean) parameter: {2} -> {3}, {4}".format(
web.ctx.ip, mail, param, value, e))
return (False, repr(e))
else:
qr = __remove_file(path=param_file)
if not qr[0]:
return qr
logger.info("[{0}] {1}, updated (boolean) parameter: {2} -> {3}".format(web.ctx.ip, mail, param, value))
return (True, )
def __update_normal_param(mail, param, value, param_file=None, is_email=False):
# Although we write all given value, but only first line is used by mlmmj.
if not param_file:
param_file = __get_param_file(mail=mail, param=param)
if param == 'maxmailsize':
try:
value = int(value)
except:
value = 0
if not value:
# Remove param file.
qr = __remove_file(path=param_file)
return qr
if value:
if is_email:
value = str(value).lower()
if not utils.is_email(value):
return (False, 'INVALID_EMAIL')
try:
if isinstance(value, int):
value = str(value)
with open(param_file, 'w', encoding='utf-8') as f:
f.write(value + '\n')
except Exception as e:
logger.error("[{0}] {1}, error while updating (normal) parameter: {2} -> {3}, {4}".format(
web.ctx.ip, mail, param, value, e))
return (False, repr(e))
else:
qr = __remove_file(path=param_file)
if not qr[0]:
return qr
logger.info("[{0}] {1}, updated (normal) parameter: {2} -> {3}".format(web.ctx.ip, mail, param, value))
return (True, )
def __update_list_param(mail, param, value, param_file=None, is_email=False):
if not param_file:
param_file = __get_param_file(mail=mail, param=param)
if isinstance(value, str):
_values = __convert_web_param_value_to_list(value=value, is_email=is_email)
else:
_values = value
if _values:
try:
param_file = __get_param_file(mail=mail, param=param)
if param == 'listaddress':
# Remove primary address(es)
_values = [v for v in _values if v != mail]
# Prepend primary address (must be first one)
_values = [mail] + _values
with open(param_file, 'w', encoding='utf-8') as f:
f.write('\n'.join(_values) + '\n')
logger.info("[{0}] {1}, updated: {2} -> {3}".format(web.ctx.ip, mail, param, ', '.join(_values)))
except Exception as e:
logger.error("[{0}] {1}, error while updating (list) parameter: {2} -> {3}, {4}".format(
web.ctx.ip, mail, param, value, e))
return (False, repr(e))
else:
qr = __remove_file(path=param_file)
if not qr[0]:
return qr
logger.info("[{0}] {1}, updated (list) parameter: {2} -> {3}".format(web.ctx.ip, mail, param, value))
return (True, )
def __update_text_param(mail,
param,
value,
param_file=None,
create_if_empty=False):
if not param_file:
param_file = __get_param_file(mail=mail, param=param)
if value:
try:
if isinstance(value, int):
value = str(value)
else:
value = value.strip()
# Footer text/html must ends with an empty line, otherwise
# the characters will be a mess.
with open(param_file, 'w', encoding='utf-8') as f:
f.write(value + '\n')
except Exception as e:
logger.error("[{0}] {1}, error while updating (normal) parameter: {2} -> {3}, {4}".format(
web.ctx.ip, mail, param, value, e))
return (False, repr(e))
else:
if create_if_empty:
# Footer text/html must ends with an empty line, otherwise
# the characters will be a mess.
with open(param_file, 'w', encoding='utf-8') as f:
f.write('\n')
else:
qr = __remove_file(path=param_file)
if not qr[0]:
return qr
logger.info("[{0}] {1}, updated (text) parameter: {2} -> {3}".format(web.ctx.ip, mail, param, value))
return (True, )
def __update_other_param(mail, param, value):
"""Update parameters which cannot be simply mapped to a mlmmj parameter."""
if param in settings.MLMMJ_OTHER_PARAM_MAP:
_v = settings.MLMMJ_OTHER_PARAM_MAP[param]
_param_type = _v['type']
_mlmmj_param = _v['mlmmj_param']
_is_email | |
size = sig.size
plt.imshow(sig.reshape((N_FRAMES, size//N_FRAMES)).T)
plt.clim(min_val_exp - diff/10, max_val_exp + diff/10)
plt.title(titles[i])
plt.savefig(plotdir + '{}.png'.format(name), dpi=400)
plt.close()
async def do_pcm_test_fft_correction(dut, pdm_fname, test_num):
'''Correct for fft innacuracies.'''
print('Starting pcm test with fft correction.')
x = np.load(pdm_fname, allow_pickle=True)
x = parse_mic_data.pad_pdm(x) # pad half-second signal to 1 second
x = pdm.pdm_to_pcm(x, 2)
y = aco.aco(x)
cocotb.fork(write_pcm_input(dut, x)) # change on falling edge of pdm clk
fft_out = await check_fft(dut, y[2], test_num)
wake_bad = await read_wake_no_assert(dut)
print('Running through with known fft value.')
for i in range(8000):
await FallingEdge(dut.clk_i)
y = aco.aco(x, fft_override=fft_out)
cocotb.fork(write_pcm_input(dut, x)) # change on falling edge of pdm clk
# fft_out = await check_fft(dut, y[2], test_num)
cocotb.fork(check_final(dut, y[8], test_num))
aco_out = y[-1]
aco_out = aco_out.reshape((int(aco_out.size / 13), 13))
wrd_out = na.get_numpy_pred(aco_out)[0]
wake_expected = (wrd_out[0] > wrd_out[1])
await read_wake(dut, wake_expected)
print('Finished test.')
return wake_expected # assert ensured expected is observed
async def do_pcm_test(dut, pdm_fname):
x = np.load(pdm_fname, allow_pickle=True)
x = parse_mic_data.pad_pdm(x) # pad half-second signal to 1 second
x = pdm.pdm_to_pcm(x, 2)
cocotb.fork(write_pcm_input(dut, x)) # change on falling edge of pdm clk
wake = await read_wake_no_assert(dut)
return wake
@cocotb.test()
async def test_wakey_wakey(dut):
# Create a 10us period clock on port clk
clock = Clock(dut.clk_i, 10, units="us")
cocotb.fork(clock.start())
# Reset DUT
await FallingEdge(dut.clk_i)
dut.rst_n_i <= 0
dut.wbs_stb_i <= 0
dut.wbs_cyc_i <= 0
dut.wbs_we_i <= 0
dut.wbs_sel_i <= 0
dut.wbs_dat_i <= 0
dut.wbs_adr_i <= 0
# dut.pdm_data_i <= 0
dut.dfe_data <= 0
dut.dfe_valid <= 0
dut.vad_i <= 0
dut.la_data_in_i <= 0
dut.la_oenb_i <= 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
# wait long enough for reset to be effective
for _ in range(50):
await FallingEdge(dut.clk_i)
dut.rst_n_i <= 1
dut.vad_i <= 1
await FallingEdge(dut.clk_i)
'''
print('=' * 100)
print('Beginning Load/Store Test')
print('=' * 100)
# Store Test
# Sequential Store - Conv 1 Memory Bank 0 (Weight - 104b)
for i in range(8):
await cfg_store(dut, i, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 1 Memory Bank 1 (Weight - 104b)
for i in range(8):
await cfg_store(dut, i + 0x10, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 1 Memory Bank 2 (Weight - 104b)
for i in range(8):
await cfg_store(dut, i + 0x20, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 1 Memory Bank 3 (Bias - 32b)
for i in range(8):
await cfg_store(dut, i + 0x30, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 1 Memory Bank 4 (Shift - 5b)
await cfg_store(dut, 0x40, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 2 Memory Bank 0 (Weight - 64b)
for i in range(16):
await cfg_store(dut, i + 0x50, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 2 Memory Bank 1 (Weight - 64b)
for i in range(16):
await cfg_store(dut, i + 0x60, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 2 Memory Bank 2 (Weight - 64b)
for i in range(16):
await cfg_store(dut, i + 0x70, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 2 Memory Bank 3 (Bias - 32b)
for i in range(16):
await cfg_store(dut, i + 0x80, i + 3, i + 2, i + 1, i)
# Sequential Store - Conv 2 Memory Bank 4 (Shift - 5b)
await cfg_store(dut, 0x90, i + 3, i + 2, i + 1, i)
# Sequential Store - FC Memory Bank 0 (Weight - 8b)
for i in range(208):
await cfg_store(dut, i + 0x100, i + 3, i + 2, i + 1, i)
# Sequential Store - FC Memory Bank 1 (Weight - 8b)
for i in range(208):
await cfg_store(dut, i + 0x200, i + 3, i + 2, i + 1, i)
# Sequential Store - FC Memory Bank 3 (Bias - 32b)
await cfg_store(dut, 0x300, i + 3, i + 2, i + 1, i)
# Sequential Store - FC Memory Bank 3 (Bias - 32b)
await cfg_store(dut, 0x400, i + 3, i + 2, i + 1, i)
# Load Test
# Sequential Load - Conv 1 Memory Bank 0 (Weight - 104b)
for i in range(8):
observed = await cfg_load(dut, i)
expected = [i + 3, i + 2, i + 1, i]
assert observed == expected
# Sequential Load - Conv 1 Memory Bank 1 (Weight - 104b)
for i in range(8):
observed = await cfg_load(dut, i + 0x10)
expected = [i + 3, i + 2, i + 1, i]
assert observed == expected
# Sequential Load - Conv 1 Memory Bank 2 (Weight - 104b)
for i in range(8):
observed = await cfg_load(dut, i + 0x20)
expected = [i + 3, i + 2, i + 1, i]
assert observed == expected
# Sequential Load - Conv 1 Memory Bank 3 (Bias - 32b)
for i in range(8):
observed = await cfg_load(dut, i + 0x30)
expected = [0, 0, 0, i]
assert observed == expected
# Sequential Load - Conv 1 Memory Bank 4 (Shift - 5b)
observed = await cfg_load(dut, 0x40)
expected = [0, 0, 0, i]
assert observed == expected
# Sequential Load - Conv 2 Memory Bank 0 (Weight - 64b)
for i in range(16):
observed = await cfg_load(dut, i + 0x50)
expected = [0, 0, i + 1, i]
assert observed == expected
# Sequential Load - Conv 2 Memory Bank 1 (Weight - 64b)
for i in range(16):
observed = await cfg_load(dut, i + 0x60)
expected = [0, 0, i + 1, i]
assert observed == expected
# Sequential Load - Conv 2 Memory Bank 2 (Weight - 64b)
for i in range(16):
observed = await cfg_load(dut, i + 0x70)
expected = [0, 0, i + 1, i]
assert observed == expected
# Sequential Load - Conv 2 Memory Bank 3 (Bias - 32b)
for i in range(16):
observed = await cfg_load(dut, i + 0x80)
expected = [0, 0, 0, i]
assert observed == expected
# Sequential Load - Conv 2 Memory Bank 4 (Shift - 5b)
observed = await cfg_load(dut, 0x90)
expected = [0, 0, 0, i]
assert observed == expected
# Sequential Load - FC Memory Bank 0 (Weight - 8b)
for i in range(208):
observed = await cfg_load(dut, i + 0x100)
expected = [0, 0, 0, i]
assert observed == expected
# Sequential Load - FC Memory Bank 1 (Weight - 8b)
for i in range(208):
observed = await cfg_load(dut, i + 0x200)
expected = [0, 0, 0, i]
assert observed == expected
# Sequential Load - FC Memory Bank 3 (Bias - 32b)
observed = await cfg_load(dut, 0x300)
expected = [0, 0, 0, i]
assert observed == expected
# Sequential Load - FC Memory Bank 3 (Bias - 32b)
observed = await cfg_load(dut, 0x400)
expected = [0, 0, 0, i]
assert observed == expected
'''
# n_fixed_tests = 4 # number of different types of fixed tests
# for i in range(n_fixed_tests):
# print('=' * 100)
# print('Beginning fixed test {}/{}.'.format(i+1, n_fixed_tests))
# print('=' * 100)
# await do_fixed_test(dut, i)
#
# n_random_tests = 3 # number of different types of random tests
# n_repeats = 5 # how many times to repeat each random test
# for i in range(n_random_tests):
# for j in range(n_repeats):
# print('=' * 100)
# print('Beginning random test {}/{} repeat num {}/{}.' \
# .format(i+1, n_random_tests, j+1, n_repeats))
# print('=' * 100)
# await do_random_test(dut, i)
#
# n_mfcc_tests = 10 # number of tests to do with real MFCC features
# for i in range(n_mfcc_tests):
# print('=' * 100)
# print('Beginning MFCC test {}/{} '.format(i+1, n_mfcc_tests))
# print('=' * 100)
# params = na.get_params()
# await write_mem_params(dut, params)
# await do_mfcc_test(dut)
| |
[[float(i == j) for j in range(n)] for i in range(n)]
#pivot
if piv:
for j in range(len(A)):
A, P, Q = pivot(A, P, Q, j, piv)
for k in range(n):
for i in range(k, n):
L[i][k] = A[i][k] - sum(L[i][p]*U[p][k] for p in range(k))
for i in range(k+1, n):
if L[k][k] == 0:
exit('Debe usarse pivoteo parcial')
U[k][i] = (A[k][i] - sum(L[k][p]*U[p][i] for p in range(k))) / float(L[k][k])
if piv == 2:
return P, Q, L, U
return P, L, U
def croutL1U(A, piv=0):
"""Calcula el metodo L1U"""
n = len(A)
U = [[0.0]*n for j in range(n)];
L = [[float(i == j) for j in range(n)] for i in range(n)]
P = [[float(i == j) for j in range(n)] for i in range(n)]
Q = [[float(i == j) for j in range(n)] for i in range(n)]
#pivot
if piv:
for j in range(len(A)):
A, P, Q = pivot(A, P, Q, j, piv)
for k in range(n):
for i in range(k, n):
U[k][i] = A[k][i] - sum(L[k][p]*U[p][i] for p in range(k))
for i in range(k+1, n):
if U[k][k] == 0:
exit('Debe usarse pivoteo parcial')
L[i][k] = (A[i][k] - sum(L[i][p]*U[p][k] for p in range(k))) / float(U[k][k])
if piv == 2:
return P, Q, L, U
return P, L, U
def Doolittle(A, piv=0):
n = len(A)
U = [[0.0]*n for j in range(n)]
L = [[float(i == j) for j in range(n)] for i in range(n)]
P = [[float(i == j) for j in range(n)] for i in range(n)]
Q = [[float(i == j) for j in range(n)] for i in range(n)]
#pivot
if piv:
for j in range(len(A)):
A, P, Q = pivot(A, P, Q, j, piv)
for k in range(n):
for i in range(n):
U[k][i] = A[k][i] - sum(L[k][p]*U[p][i] for p in range(k))
for i in range(k, n):
if U[k][k] == 0:
exit('Debe usarse pivoteo parcial')
L[i][k] = (A[i][k] - sum(L[i][p]*U[p][k] for p in range(k))) / float(U[k][k])
if piv == 2:
return P, Q, L, U
return P, L, U
def LDMt(A, piv=0):
""""""
if piv == 2:
P, I, L, U = croutLU1(A, piv)
else:
P, L, U = croutLU1(A, piv)
D, M = diagonalDMt(U)
return L, D, M
def diagonalDMt(U):
n = len(U)
D = [[float(i == j) for j in range(n)] for i in range(n)]
for i in range(n):
D[i][i] = float(U[i][i])
for i in range(n):
for j in range(n):
U[i][j] /= (D[i][i])
return D, U
def cholesky(A):
if not symmetricMatrix(A):
exit('La matriz no es simetrica')
n = len(A)
G = [[0.0]*n for j in range(n)]
for i in range(n):
suma = A[i][i]
for k in range(i):
suma -= A[k][i]**2
if suma < 0:
exit('No es definida positiva')
A[i][i] = sqrt(suma)
for j in range(i+1, n):
suma = A[i][j]
for k in range(i):
suma -= A[k][i]*A[k][j]
A[i][j] = suma / A[i][i]
for j in range(n):
for i in range(n):
if(i > j):
A[i][j] = 0.0
return A
def norm(x):
"""Norma euclideana del vector x, ie, ||x||2"""
return sqrt(sum([x_i**2 for x_i in x]))
def Q_i(Q_min, i, j, k):
"""Rellenando elementos de la matrix Q_t"""
if i < k or j < k:
return float(i == j)
else:
return Q_min[i-k][j-k]
def Householder(A):
""" Transformacion por Householder, donde Hn...H2*H1*A = R, Q_t*A = R
Q = H1*H2 ... Hn, Q es ortogonal (Q * Qt = I)
Retorna Q y R, donde A = Q*R
"""
n = len(A)
R = A
Q = [[0.0] * n for i in range(n)]
for k in range(n-1):
I = [[float(i == j) for i in range(n)] for j in range(n)]
# Se crea los vectores x, e y un escalar alpha
x = [row[k] for row in R[k:]]
e = [row[k] for row in I[k:]]
#La funcion cmp(a, b) retorna -1 si a<b, 1 si a>b, 0 si a==b
alpha = -cmp(x[0],0) * norm(x)
#Se crea los vectores u, v
u = map(lambda p,q: p + alpha * q, x, e)
norm_u = norm(u)
v = map(lambda p: p/norm_u, u)
#Se crea la matriz menor Q_t
Q_min = [ [float(i==j) - 2.0 * v[i] * v[j] for i in range(n-k)] for j in range(n-k) ]
#Se rellena la matriz menor Q (Q_min)
Q_t = [[ Q_i(Q_min,i,j,k) for i in range(n)] for j in range(n)]
#Si esta en la primera ejecutada, entonces se calcula Q_t*A = R
#Sino, Q_tn .. Q_t1*A = R
if k == 0:
Q = Q_t
R = matrixMulti(Q_t,A)
else:
Q = matrixMulti(Q_t,Q)
R = matrixMulti(Q_t,R)
#Se retorna la transpuesta de los Q_t, ie, trans(Q_tn* ... *Q_t1) = Q
return trans(Q), R
def givens(A):
""" Gn* ... G2*G1*A = R
Q_t = Gn* ... G2*G1
A = Q*R, de la propiedad Q_t * Q = I
"""
n = len(A)
An = A
Gn = [[float(i == j) for j in range(n)] for i in range(n)]
Q_t = [[float(i == j) for j in range(n)] for i in range(n)]
a = An[0][n-2]
b = An[0][n-1]
index = 1
for i in range(n):
for j in range(n-1, i, -1):
a = An[j-1][i]
b = An[j][i]
if a*a + b*b == 0:
continue
cosX = a / (sqrt(a*a + b*b))
sinX = -b / (sqrt(a*a + b*b))
Gn[j][j] = cosX
Gn[j][j-1] = sinX
Gn[j-1][j] = -sinX
Gn[j-1][j-1] = cosX
print 'G' +str(index) + ':'
printMatrix(Gn)
An = matrixMulti(Gn, An)
print 'A' +str(index) + ':'
printMatrix(An)
Q_t = matrixMulti(Gn, Q_t)
#Volviendo la matriz Gn a la identidad
Gn = [[float(k == l) for l in range(n)] for k in range(n)]
index += 1
return trans(Q_t), An
def normaInfVector(L):
""" Calcula la norma infinita de un vector:
||x|| = max {|xi|}, i = 0, 1, ... n.
"""
maximum = fabs(L[0])
for i in range(1, len(L)):
maximum = max(maximum, fabs(L[i]))
return maximum
def jacobi(A, b, prec=1e-7):
"""Metodo que calcula la solucion Ax = b, usando tecnicas iterativas"""
n = len(A)
Xk = [0.0]*n
sumation = 0.0
for i in range(n):
if A[i][i] == 0:
exit('Los elementos A[i][i] deben ser diferentes de 0')
Xk1 = [b[i]/float(A[i][i]) for i in range(n)]
minus = lambda x, y: [x[i]-y[i] for i in range(n)]
for j in range(n):
dominancia = 0.0
for i in range(j+1, n):
if j != i:
dominancia += fabs(A[i][j])
if A[i][i] < dominancia:
exit('La matriz no converge')
while (normaInfVector(minus(Xk1,Xk)) / float(normaInfVector(Xk1))) > prec:
Xk[:] = Xk1[:]
for i in range(n):
sumation = sum(A[i][j]*Xk1[j] if i!=j else 0 for j in range(n))
Xk1[i] = (1.0/A[i][i])*(b[i] - sumation)
print Xk1
return Xk1
def gaussSeidel(A, b, prec=1e-7):
"""Metodo que calcula la solucion Ax = b, usando tecnicas iterativas"""
n = len(A)
Xk = [0.0]*n
sumation = 0.0
for i in range(n):
if A[i][i] == 0:
exit('Los elementos A[i][i] deben ser diferentes de 0')
Xk1 = [b[i]/float(A[i][i]) for i in range(n)]
minus = lambda x, y: [x[i]-y[i] for i in range(n)]
for j in range(n):
dominancia = 0.0
for i in range(j+1, n):
if j != i:
dominancia += fabs(A[i][j])
if A[i][i] < dominancia:
exit('La matriz no converge')
while (normaInfVector(minus(Xk1,Xk)) / float(normaInfVector(Xk1))) > prec:
Xk[:] = Xk1[:]
for i in range(n):
sumation1 = sum(A[i][j]*Xk1[j] for j in range(i))
sumation2 = sum(A[i][j]*Xk1[j] for j in range(i+1, n))
Xk1[i] = (1.0/A[i][i])*(b[i] - sumation1 - sumation2)
print Xk1
return Xk1
def sor(A, b, prec=1e-7, w=1.5):
""" Metodo SOR, calcula la solucion de un sistema de ecuaciones
usando el parametro de relajacion w, que necesariamente varia
de <0, 2> solo en el caso que la matriz converja.
"""
n = len(A)
Xk = [0.0]*n
sumation = 0.0
#Garantizar que los elementos de la diagonal principal sean distintos de cero
for i in range(n):
if A[i][i] == 0:
exit('Los elementos A[i][i] deben ser diferentes de 0')
Xk1 = [b[i]/float(A[i][i]) for i in range(n)]
minus = lambda x, y: [x[i]-y[i] for i in range(n)]
# for j in range(n):
# dominancia = 0.0
# for i in range(n):
# if j != i:
# dominancia += fabs(A[i][j])
# if A[i][i] < dominancia:
# exit('La matriz no converge')
while (normaInfVector(minus(Xk1,Xk)) / float(normaInfVector(Xk1))) > prec:
Xk[:] = Xk1[:]
for i in range(n):
sumation1 = sum(A[i][j]*Xk1[j] for j in range(i))
sumation2 = sum(A[i][j]*Xk1[j] for j in range(i+1, n))
Xk1[i] = (float(w)/A[i][i])*(b[i] - sumation1 - sumation2) + (1-w)*Xk[i]
# print Xk1
return Xk1
def menu():
print "\t\t\t Bienvenidos al Programa de"
print "\t\t\tResolucion de Ecuaciones Lineales"
opc = 1
while(opc):
print "Elija el metodo a usar:\n"
print """
0. Definiciones
1. Gauss
2. Gauss-Jordan
3. Crout LU1
4. Crout L1U
5. Doolittle
6. LDMt
7. Cholesky
8. <NAME> (falta)
9. Aasen (falta)
10. Householder
11. Givens
12. Jacobi
13. Gauss-Seidel
14. SOR
15. Condicionamiento (incompleto)
16. Salir
"""
opc = input('Ingrese opcion\n')
if type(opc) != type(1):
exit('Debe ingresar un valor entero, vuela a intentarlo')
if opc == 16:
exit('Gracias por usar el programa')
if opc < 0 or opc > 16:
print 'Opcion incorrecta'
continue
#Ver la documentacion
if opc == 0:
print __doc__
opc = 1
continue
Matrix = inputMatrix()
#Matrix = [
# [1, -4, 2, 1],
# [2, -6, 1, 4],
# [-1, 2, 3, 4],
# [0, -1, 1, 1]
# ]
#El condicionamiento no necesita el vector b
if opc != 15:
b = inputVector(Matrix)
#b = [-4, 1, 12, 0]
restric = [7, 10, 11, 12, 13, 14]
if opc not in restric:
print "Elija el pivoteo a usar:\n"
print """
0. Sin pivoteo
1. Pivoteo parcial
2. Pivoteo total
"""
piv = int(raw_input('Ingrese opcion\n'))
#Gauss
if opc == 1:
x = gauss(Matrix, b, piv)
print 'x:'
print x
#Gauss-Jordan
elif opc == 2:
x = gaussJordan(Matrix, b, piv)
print 'x:'
print x
#croutLU1, croutL1U, Doolittle
elif opc == 3 or |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.